diff --git a/apis/v1alpha1/common.go b/apis/v1alpha1/common.go index ac6dc358..d2039572 100644 --- a/apis/v1alpha1/common.go +++ b/apis/v1alpha1/common.go @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// StorageRetainPolicyType is the type of the storage retain policy. type StorageRetainPolicyType string const ( @@ -34,19 +35,19 @@ const ( type Phase string const ( - // PhaseStarting means the controller start to create cluster. + // PhaseStarting means the controller start to create cluster or standalone. PhaseStarting Phase = "Starting" - // PhaseRunning means all the components of cluster is ready. + // PhaseRunning means all the components of cluster or standalone is ready. PhaseRunning Phase = "Running" - // PhaseUpdating means the cluster is updating. + // PhaseUpdating means the cluster or standalone is updating. PhaseUpdating Phase = "Updating" // PhaseError means some kind of error happen in reconcile. PhaseError Phase = "Error" - // PhaseTerminating means the cluster is terminating. + // PhaseTerminating means the cluster or standalone is terminating. PhaseTerminating Phase = "Terminating" ) @@ -54,20 +55,29 @@ const ( type ComponentKind string const ( + // FrontendComponentKind is the frontend component kind. FrontendComponentKind ComponentKind = "frontend" + + // DatanodeComponentKind is the datanode component kind. DatanodeComponentKind ComponentKind = "datanode" - MetaComponentKind ComponentKind = "meta" + + // MetaComponentKind is the meta component kind. + MetaComponentKind ComponentKind = "meta" + + // FlownodeComponentKind is the flownode component kind. FlownodeComponentKind ComponentKind = "flownode" - StandaloneKind ComponentKind = "standalone" + + // StandaloneKind is the standalone component kind. + StandaloneKind ComponentKind = "standalone" ) // SlimPodSpec is a slimmed down version of corev1.PodSpec. -// Most of the fields in SlimPodSpec are copied from corev1.PodSpec. +// Most of the fields in SlimPodSpec are copied from `corev1.PodSpec`. type SlimPodSpec struct { // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // NodeSelector field is from 'corev1.PodSpec.NodeSelector'. + // More info: `https://kubernetes.io/docs/concepts/configuration/assign-pod-node/` + // NodeSelector field is from `corev1.PodSpec.NodeSelector`. // +optional NodeSelector map[string]string `json:"nodeSelector,omitempty"` @@ -83,16 +93,16 @@ type SlimPodSpec struct { // in a similar fashion. // Init containers cannot currently be added or removed. // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // InitContainers field is from 'corev1.PodSpec.InitContainers'. + // More info: `https://kubernetes.io/docs/concepts/workloads/pods/init-containers/` + // InitContainers field is from `corev1.PodSpec.InitContainers`. // +optional InitContainers []corev1.Container `json:"initContainers,omitempty"` // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - // RestartPolicy field is from 'corev1.PodSpec.RestartPolicy'. + // One of `Always`, `OnFailure`, `Never`. + // Default to `Always`. + // More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy` + // RestartPolicy field is from `corev1.PodSpec.RestartPolicy`. // +optional RestartPolicy corev1.RestartPolicy `json:"restartPolicy,omitempty"` @@ -104,49 +114,49 @@ type SlimPodSpec struct { // a termination signal and the time when the processes are forcibly halted with a kill signal. // Set this value longer than the expected cleanup time for your process. // Defaults to 30 seconds. - // TerminationGracePeriodSeconds field is from 'corev1.PodSpec.TerminationGracePeriodSeconds'. + // TerminationGracePeriodSeconds field is from `corev1.PodSpec.TerminationGracePeriodSeconds`. // +optional TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` // Optional duration in seconds the pod may be active on the node relative to // StartTime before the system will actively try to mark it failed and kill associated containers. // Value must be a positive integer. - // ActiveDeadlineSeconds field is from 'corev1.PodSpec.ActiveDeadlineSeconds'. + // ActiveDeadlineSeconds field is from `corev1.PodSpec.ActiveDeadlineSeconds`. // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // Defaults to `ClusterFirst`. + // Valid values are `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`. // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - // DNSPolicy field is from 'corev1.PodSpec.DNSPolicy'. + // explicitly to `ClusterFirstWithHostNet`. + // DNSPolicy field is from `corev1.PodSpec.DNSPolicy`. // +optional DNSPolicy corev1.DNSPolicy `json:"dnsPolicy,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - // ServiceAccountName field is from 'corev1.PodSpec.ServiceAccountName'. + // More info: `https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/` + // ServiceAccountName field is from `corev1.PodSpec.ServiceAccountName`. // +optional ServiceAccountName string `json:"serviceAccountName,omitempty"` // Host networking requested for this pod. Use the host's network namespace. // If this option is set, the ports that will be used must be specified. - // Default to false. - // HostNetwork field is from 'corev1.PodSpec.HostNetwork'. + // Default to `false`. + // HostNetwork field is from `corev1.PodSpec.HostNetwork`. // +optional HostNetwork bool `json:"hostNetwork,omitempty"` // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. - // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - // ImagePullSecrets field is from 'corev1.PodSpec.ImagePullSecrets'. + // More info: `https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod` + // ImagePullSecrets field is from `corev1.PodSpec.ImagePullSecrets`. // +optional ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // If specified, the pod's scheduling constraints - // Affinity field is from 'corev1.PodSpec.Affinity'. + // Affinity field is from `corev1.PodSpec.Affinity`. // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` @@ -156,11 +166,11 @@ type SlimPodSpec struct { // If specified, the pod will be dispatched by specified scheduler. // If not specified, the pod will be dispatched by default scheduler. - // SchedulerName field is from 'corev1.PodSpec.SchedulerName'. + // SchedulerName field is from `corev1.PodSpec.SchedulerName`. // +optional SchedulerName string `json:"schedulerName,omitempty"` - // For most time, there is one main container in a pod(frontend/meta/datanode). + // For most time, there is one main container in a pod(`frontend`/`meta`/`datanode`/`flownode`). // If specified, additional containers will be added to the pod as sidecar containers. // +optional AdditionalContainers []corev1.Container `json:"additionalContainers,omitempty"` @@ -183,25 +193,25 @@ type MainContainerSpec struct { // Entrypoint array. Not executed within a shell. // The container image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // Variable references `$(VAR_NAME)` are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double `$$` are reduced + // to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. `$$(VAR_NAME)` will + // produce the string literal `$(VAR_NAME)`. Escaped references will never be expanded, regardless // of whether the variable exists or not. Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // Command field is from 'corev1.Container.Command'. + // More info: `https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell` + // Command field is from `corev1.Container.Command`. // +optional Command []string `json:"command,omitempty"` // Arguments to the entrypoint. // The container image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // Variable references `$(VAR_NAME)` are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double `$$` are reduced + // to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. `$$(VAR_NAME)` will + // produce the string literal `$(VAR_NAME)`. Escaped references will never be expanded, regardless // of whether the variable exists or not. Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // Args field is from 'corev1.Container.Args'. + // More info: `https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell` + // Args field is from `corev1.Container.Args`. // +optional Args []string `json:"args,omitempty"` @@ -209,42 +219,42 @@ type MainContainerSpec struct { // If not specified, the container runtime's default will be used, which // might be configured in the container image. // Cannot be updated. - // WorkingDir field is from 'corev1.Container.WorkingDir'. + // WorkingDir field is from `corev1.Container.WorkingDir`. // +optional WorkingDir string `json:"workingDir,omitempty"` // List of environment variables to set in the container. // Cannot be updated. - // Env field is from 'corev1.Container.Env'. + // Env field is from `corev1.Container.Env`. // +optional Env []corev1.EnvVar `json:"env,omitempty"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // LivenessProbe field is from 'corev1.Container.LivenessProbe'. + // More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes` + // LivenessProbe field is from `corev1.Container.LivenessProbe`. // +optional LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty"` // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. - // ReadinessProbe field is from 'corev1.Container.LivenessProbe'. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // ReadinessProbe field is from `corev1.Container.LivenessProbe`. + // More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes` // +optional ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. - // Lifecycle field is from 'corev1.Container.Lifecycle'. + // Lifecycle field is from `corev1.Container.Lifecycle`. // +optional Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // One of `Always`, `Never`, `IfNotPresent`. + // Defaults to `Always` if `:latest` tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - // ImagePullPolicy field is from 'corev1.Container.ImagePullPolicy'. + // More info: `https://kubernetes.io/docs/concepts/containers/images#updating-images` + // ImagePullPolicy field is from `corev1.Container.ImagePullPolicy`. // +optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` @@ -254,6 +264,13 @@ type MainContainerSpec struct { VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` } +func (in *MainContainerSpec) GetImage() string { + if in != nil { + return in.Image + } + return "" +} + // PodTemplateSpec defines the template for a pod of cluster. type PodTemplateSpec struct { // The annotations to be created to the pod. @@ -273,138 +290,327 @@ type PodTemplateSpec struct { SlimPodSpec `json:",inline"` } -// StorageSpec will generate PVC. -type StorageSpec struct { - // The name of the storage. +// FileStorage defines the file storage specification. It is used to generate the PVC that will be mounted to the container. +type FileStorage struct { + // Name is the name of the PVC that will be created. // +optional Name string `json:"name,omitempty"` - // The name of the storage class to use for the volume. + // StorageClassName is the name of the StorageClass to use for the PVC. // +optional StorageClassName *string `json:"storageClassName,omitempty"` - // The size of the storage. + // StorageSize is the size of the storage. // +optional // +kubebuilder:validation:Pattern=(^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) StorageSize string `json:"storageSize,omitempty"` - // The mount path of the storage in datanode container. + // MountPath is the path where the storage will be mounted in the container. // +optional MountPath string `json:"mountPath,omitempty"` - // The PVCs will retain or delete when the cluster is deleted, default to Retain. + // StorageRetainPolicy is the policy of the storage. It can be `Retain` or `Delete`. // +optional // +kubebuilder:validation:Enum:={"Retain", "Delete"} StorageRetainPolicy StorageRetainPolicyType `json:"storageRetainPolicy,omitempty"` +} - // The wal directory of the storage. - WalDir string `json:"walDir,omitempty"` +// FileStorageAccessor is the interface that wraps the basic methods for the FileStorage. +// +kubebuilder:object:generate=false +type FileStorageAccessor interface { + GetName() string + GetStorageClassName() *string + GetSize() string + GetMountPath() string + GetPolicy() StorageRetainPolicyType +} - // The datahome directory. - // +optional - DataHome string `json:"dataHome,omitempty"` +func (in *FileStorage) GetName() string { + if in != nil { + return in.Name + } + return "" +} + +func (in *FileStorage) GetStorageClassName() *string { + if in != nil { + return in.StorageClassName + } + return nil } -// RemoteWalProvider defines the remote wal provider for the cluster. -type RemoteWalProvider struct { +func (in *FileStorage) GetSize() string { + if in != nil { + return in.StorageSize + } + return "" +} + +func (in *FileStorage) GetMountPath() string { + if in != nil { + return in.MountPath + } + return "" +} + +func (in *FileStorage) GetPolicy() StorageRetainPolicyType { + if in != nil { + return in.StorageRetainPolicy + } + return "" +} + +// WALProviderSpec defines the WAL provider for the cluster. +type WALProviderSpec struct { + // RaftEngineWAL is the specification for local WAL that uses raft-engine. // +optional - KafkaRemoteWal *KafkaRemoteWal `json:"kafka,omitempty"` + RaftEngineWAL *RaftEngineWAL `json:"raftEngine,omitempty"` + + // KafkaWAL is the specification for remote WAL that uses Kafka. + // +optional + KafkaWAL *KafkaWAL `json:"kafka,omitempty"` } -// KafkaRemoteWal is the specification for remote WAL that uses Kafka. -type KafkaRemoteWal struct { +// RaftEngineWAL is the specification for local WAL that uses raft-engine. +type RaftEngineWAL struct { + // FileStorage is the file storage configuration for the raft-engine WAL. + // If the file storage is not specified, WAL will use DatanodeStorageSpec. // +optional - BrokerEndpoints []string `json:"brokerEndpoints,omitempty"` + FileStorage *FileStorage `json:"fs,omitempty"` +} + +// KafkaWAL is the specification for Kafka remote WAL. +type KafkaWAL struct { + // BrokerEndpoints is the list of Kafka broker endpoints. + // +required + BrokerEndpoints []string `json:"brokerEndpoints"` +} + +func (in *WALProviderSpec) GetRaftEngineWAL() *RaftEngineWAL { + if in != nil { + return in.RaftEngineWAL + } + return nil +} + +func (in *WALProviderSpec) GetKafkaWAL() *KafkaWAL { + if in != nil { + return in.KafkaWAL + } + return nil +} + +func (in *RaftEngineWAL) GetFileStorage() *FileStorage { + if in != nil { + return in.FileStorage + } + return nil } +func (in *KafkaWAL) GetBrokerEndpoints() []string { + if in != nil { + return in.BrokerEndpoints + } + return nil +} + +// ServiceSpec defines the service configuration for the component. type ServiceSpec struct { - // type determines how the Service is exposed. + // Type is the type of the service. // +optional Type corev1.ServiceType `json:"type,omitempty"` - // Additional annotations for the service + // Annotations is the annotations for the service. // +optional Annotations map[string]string `json:"annotations,omitempty"` - // Additional labels for the service + // Labels is the labels for the service. // +optional Labels map[string]string `json:"labels,omitempty"` - // loadBalancerClass is the class of the load balancer implementation this Service belongs to. + // LoadBalancerClass is the class of the load balancer. // +optional LoadBalancerClass *string `json:"loadBalancerClass,omitempty"` } +// TLSSpec defines the TLS configurations for the component. type TLSSpec struct { - // The secret name of the TLS certificate, and it must be in the same namespace of the cluster. - // The secret must contain keys named ca.crt, tls.crt and tls.key. - // +optional - SecretName string `json:"secretName,omitempty"` + // SecretName is the name of the secret that contains the TLS certificates. + // The secret must be in the same namespace with the greptime resource. + // The secret must contain keys named `tls.crt` and `tls.key`. + // +required + SecretName string `json:"secretName"` } -// ObjectStorageProvider defines the storage provider for the cluster. The data will be stored in the storage. -type ObjectStorageProvider struct { - S3 *S3StorageProvider `json:"s3,omitempty"` - OSS *OSSStorageProvider `json:"oss,omitempty"` - GCS *GCSStorageProvider `json:"gcs,omitempty"` - CachePath string `json:"cachePath,omitempty"` - CacheCapacity string `json:"cacheCapacity,omitempty"` +func (in *TLSSpec) GetSecretName() string { + if in != nil { + return in.SecretName + } + return "" } -type S3StorageProvider struct { - // The data will be stored in the bucket. +// ObjectStorageProviderSpec defines the object storage provider for the cluster. The data will be stored in the storage. +type ObjectStorageProviderSpec struct { + // S3 is the S3 storage configuration. // +optional - Bucket string `json:"bucket,omitempty"` + S3 *S3Storage `json:"s3,omitempty"` - // The region of the bucket. + // OSS is the Aliyun OSS storage configuration. // +optional - Region string `json:"region,omitempty"` + OSS *OSSStorage `json:"oss,omitempty"` - // The endpoint of the bucket. + // GCS is the Google GCS storage configuration. // +optional - Endpoint string `json:"endpoint,omitempty"` + GCS *GCSStorage `json:"gcs,omitempty"` - // The secret of storing the credentials of access key id and secret access key. - // The secret must be the same namespace with the GreptimeDBCluster resource. + // Cache is the cache storage configuration for object storage. // +optional - SecretName string `json:"secretName,omitempty"` + Cache *CacheStorage `json:"cache,omitempty"` +} - // The S3 directory path. +func (in *ObjectStorageProviderSpec) GetCacheFileStorage() *FileStorage { + if in != nil && in.Cache != nil { + return in.Cache.FileStorage + } + return nil +} + +func (in *ObjectStorageProviderSpec) GetS3Storage() *S3Storage { + if in != nil { + return in.S3 + } + return nil +} + +func (in *ObjectStorageProviderSpec) GetGCSStorage() *GCSStorage { + if in != nil { + return in.GCS + } + return nil +} + +func (in *ObjectStorageProviderSpec) GetOSSStorage() *OSSStorage { + if in != nil { + return in.OSS + } + return nil +} + +func (in *ObjectStorageProviderSpec) getSetObjectStorageCount() int { + count := 0 + if in.S3 != nil { + count++ + } + if in.OSS != nil { + count++ + } + if in.GCS != nil { + count++ + } + return count +} + +// DatanodeStorageSpec defines the storage specification for the datanode. +type DatanodeStorageSpec struct { + // DataHome is the home directory of the data. + DataHome string `json:"dataHome,omitempty"` + + // FileStorage is the file storage configuration. // +optional - Root string `json:"root,omitempty"` + FileStorage *FileStorage `json:"fs,omitempty"` } -type OSSStorageProvider struct { - // The data will be stored in the bucket. +// CacheStorage defines the cache storage specification. +type CacheStorage struct { + // Storage is the storage specification for the cache. + // If the storage is not specified, the cache will use DatanodeStorageSpec. // +optional - Bucket string `json:"bucket,omitempty"` + FileStorage *FileStorage `json:"fs,omitempty"` + + // CacheCapacity is the capacity of the cache. + // +optional + CacheCapacity string `json:"cacheCapacity,omitempty"` +} + +// S3Storage defines the S3 storage specification. +type S3Storage struct { + // The data will be stored in the bucket. + // +required + Bucket string `json:"bucket"` // The region of the bucket. + // +required + Region string `json:"region"` + + // The secret of storing the credentials of access key id and secret access key. + // The secret should contain keys named `access-key-id` and `secret-access-key`. + // The secret must be the same namespace with the GreptimeDBCluster resource. // +optional - Region string `json:"region,omitempty"` + SecretName string `json:"secretName,omitempty"` + + // The S3 directory path. + // +required + Root string `json:"root"` // The endpoint of the bucket. // +optional Endpoint string `json:"endpoint,omitempty"` +} + +func (in *S3Storage) GetSecretName() string { + if in != nil { + return in.SecretName + } + return "" +} + +// OSSStorage defines the Aliyun OSS storage specification. +type OSSStorage struct { + // The data will be stored in the bucket. + // +required + Bucket string `json:"bucket"` + + // The region of the bucket. + // +required + Region string `json:"region"` // The secret of storing the credentials of access key id and secret access key. + // The secret should contain keys named `access-key-id` and `secret-access-key`. // The secret must be the same namespace with the GreptimeDBCluster resource. // +optional SecretName string `json:"secretName,omitempty"` // The OSS directory path. + // +required + Root string `json:"root"` + + // The endpoint of the bucket. // +optional - Root string `json:"root,omitempty"` + Endpoint string `json:"endpoint,omitempty"` +} + +func (in *OSSStorage) GetSecretName() string { + if in != nil { + return in.SecretName + } + return "" } -type GCSStorageProvider struct { +// GCSStorage defines the Google GCS storage specification. +type GCSStorage struct { // The data will be stored in the bucket. - // +optional - Bucket string `json:"bucket,omitempty"` + // +required + Bucket string `json:"bucket"` // The gcs directory path. + // +required + Root string `json:"root"` + + // The secret of storing Credentials for gcs service OAuth2 authentication. + // The secret should contain keys named `service-account-key`. + // The secret must be the same namespace with the GreptimeDBCluster resource. // +optional - Root string `json:"root,omitempty"` + SecretName string `json:"secretName,omitempty"` // The scope for gcs. // +optional @@ -413,31 +619,37 @@ type GCSStorageProvider struct { // The endpoint URI of gcs service. // +optional Endpoint string `json:"endpoint,omitempty"` +} - // The secret of storing Credentials for gcs service OAuth2 authentication. - // The secret must be the same namespace with the GreptimeDBCluster resource. - // +optional - SecretName string `json:"secretName,omitempty"` +func (in *GCSStorage) GetSecretName() string { + if in != nil { + return in.SecretName + } + return "" } // PrometheusMonitorSpec defines the PodMonitor configuration. type PrometheusMonitorSpec struct { - // Enable a Prometheus PodMonitor - // +optional - Enabled bool `json:"enabled,omitempty"` + // Enabled indicates whether the PodMonitor is enabled. + // +required + Enabled bool `json:"enabled"` - // Prometheus PodMonitor labels. + // Labels is the labels for the PodMonitor. // +optional - Labels map[string]string `json:"labels,omitempty"` + Labels map[string]string `json:"labels"` - // Interval at which metrics should be scraped + // Interval is the scape interval for the PodMonitor. // +optional Interval string `json:"interval,omitempty"` } +func (in *PrometheusMonitorSpec) IsEnablePrometheusMonitor() bool { + return in != nil && in.Enabled +} + +// ConditionType is the type of the condition. type ConditionType string -// These are valid conditions of a GreptimeDBCluster and GreptimeDBStandalone. const ( // ConditionTypeReady indicates that the GreptimeDB cluster is ready to serve requests. // Every component in the cluster are all ready. diff --git a/apis/v1alpha1/constants.go b/apis/v1alpha1/constants.go new file mode 100644 index 00000000..f1767241 --- /dev/null +++ b/apis/v1alpha1/constants.go @@ -0,0 +1,75 @@ +// Copyright 2024 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +// The following constants are the default values for the GreptimeDBCluster and GreptimeDBStandalone. +const ( + // DefaultVersion is the default version of the GreptimeDB. + DefaultVersion = "Unknown" + + // DefautlHealthEndpoint is the default health endpoint for the liveness probe. + DefautlHealthEndpoint = "/health" + + // DefaultHTTPPort is the default HTTP port for the GreptimeDB. + DefaultHTTPPort int32 = 4000 + + // DefaultRPCPort is the default RPC port for the GreptimeDB. + DefaultRPCPort int32 = 4001 + + // DefaultMySQLPort is the default MySQL port for the GreptimeDB. + DefaultMySQLPort int32 = 4002 + + // DefaultPostgreSQLPort is the default PostgreSQL port for the GreptimeDB. + DefaultPostgreSQLPort int32 = 4003 + + // DefaultMetaRPCPort is the default Meta RPC port for the GreptimeDB. + DefaultMetaRPCPort int32 = 3002 + + // DefaultReplicas is the default number of replicas for components of the GreptimeDB cluster. + DefaultReplicas = 1 + + // DefaultDataSize is the default size of the data when using the file storage. + DefaultDataSize = "10Gi" + + // DefaultDataHome is the default directory for the data. + DefaultDataHome = "/data/greptimedb" + + // DefaultDatanodeFileStorageName is the default file storage name for the datanode. + DefaultDatanodeFileStorageName = "datanode" + + // DefaultStorageRetainPolicyType is the default storage retain policy type. + DefaultStorageRetainPolicyType = StorageRetainPolicyTypeRetain + + // DefaultInitializerImage is the default image for the GreptimeDB initializer. + DefaultInitializerImage = "greptime/greptimedb-initializer:latest" +) + +// The following constants are the constant configuration for the GreptimeDBCluster and GreptimeDBStandalone. +const ( + // TLSCrtSecretKey is the key for the TLS certificate in the secret. + TLSCrtSecretKey = "tls.crt" + + // TLSKeySecretKey is the key for the TLS key in the secret. + TLSKeySecretKey = "tls.key" + + // AccessKeyIDSecretKey is the key for the access key ID in the secret. + AccessKeyIDSecretKey = "access-key-id" + + // SecretAccessKeySecretKey is the key for the secret access key in the secret. + SecretAccessKeySecretKey = "secret-access-key" + + // ServiceAccountKey is the key for the service account in the secret. + ServiceAccountKey = "service-account-key" +) diff --git a/apis/v1alpha1/defaulting.go b/apis/v1alpha1/defaulting.go index aee3ac03..e7110acf 100644 --- a/apis/v1alpha1/defaulting.go +++ b/apis/v1alpha1/defaulting.go @@ -15,42 +15,12 @@ package v1alpha1 import ( - "path" "strings" "dario.cat/mergo" - "google.golang.org/protobuf/proto" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" -) - -var ( - defaultVersion = "Unknown" - - defautlHealthEndpoint = "/health" - - // The default settings for GreptimeDBClusterSpec. - defaultHTTPPort = 4000 - defaultRPCPort = 4001 - defaultMySQLPort = 4002 - defaultPostgreSQLPort = 4003 - defaultMetaRPCPort = 3002 - - // The default replicas for frontend/meta/datanode. - defaultFrontendReplicas int32 = 1 - defaultMetaReplicas int32 = 1 - defaultDatanodeReplicas int32 = 1 - defaultFlownodeReplicas int32 = 1 - - // The default storage settings for datanode. - defaultDataNodeStorageName = "datanode" - defaultStandaloneStorageName = "standalone" - defaultDataNodeStorageSize = "10Gi" - defaultDataNodeStorageMountPath = "/data/greptimedb" - defaultStorageRetainPolicyType = StorageRetainPolicyTypeRetain - defaultWalDir = path.Join(defaultDataNodeStorageMountPath, "wal") - - defaultInitializer = "greptime/greptimedb-initializer:latest" + "k8s.io/utils/pointer" ) func (in *GreptimeDBCluster) SetDefaults() error { @@ -58,8 +28,13 @@ func (in *GreptimeDBCluster) SetDefaults() error { return nil } + // Set the version of the GreptimeDBClusterSpec if it is not set. + if in.GetVersion() == "" && in.GetBaseMainContainer().GetImage() != "" { + in.Spec.Version = getVersionFromImage(in.GetBaseMainContainer().GetImage()) + } + // Merge the default settings into the GreptimeDBClusterSpec. - if err := mergo.Merge(&in.Spec, in.defaultClusterSpec()); err != nil { + if err := mergo.Merge(&in.Spec, in.defaultSpec()); err != nil { return err } @@ -71,153 +46,87 @@ func (in *GreptimeDBCluster) SetDefaults() error { return nil } -func (in *GreptimeDBStandalone) SetDefaults() error { - if in == nil { - return nil - } - - var defaultGreptimeDBStandaloneSpec = &GreptimeDBStandaloneSpec{ +func (in *GreptimeDBCluster) defaultSpec() *GreptimeDBClusterSpec { + var defaultSpec = &GreptimeDBClusterSpec{ Base: &PodTemplateSpec{ MainContainer: &MainContainerSpec{ - // The default liveness probe for the main container of GreptimeDBStandalone. + // The default liveness probe for the main container of GreptimeDBCluster. LivenessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ - Path: defautlHealthEndpoint, - Port: intstr.FromInt(defaultHTTPPort), + Path: DefautlHealthEndpoint, + Port: intstr.FromInt32(DefaultHTTPPort), }, }, }, }, }, - HTTPServicePort: int32(defaultHTTPPort), - RPCPort: int32(defaultRPCPort), - MySQLPort: int32(defaultMySQLPort), - PostgreSQLPort: int32(defaultPostgreSQLPort), - Version: defaultVersion, - LocalStorage: &StorageSpec{ - Name: defaultStandaloneStorageName, - StorageSize: defaultDataNodeStorageSize, - MountPath: defaultDataNodeStorageMountPath, - StorageRetainPolicy: defaultStorageRetainPolicyType, - WalDir: defaultWalDir, - DataHome: defaultDataNodeStorageMountPath, - }, - Service: &ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - }, + Initializer: &InitializerSpec{Image: DefaultInitializerImage}, + HTTPPort: DefaultHTTPPort, + RPCPort: DefaultRPCPort, + MySQLPort: DefaultMySQLPort, + PostgreSQLPort: DefaultPostgreSQLPort, + Version: DefaultVersion, + Frontend: in.defaultFrontend(), + Meta: in.defaultMeta(), + Datanode: in.defaultDatanode(), } - if in.Spec.Version == "" && - in.Spec.Base != nil && - in.Spec.Base.MainContainer != nil && - in.Spec.Base.MainContainer.Image != "" { - in.Spec.Version = getVersionFromImage(in.Spec.Base.MainContainer.Image) - } - - if err := mergo.Merge(&in.Spec, defaultGreptimeDBStandaloneSpec); err != nil { - return err + if in.GetFlownode() != nil { + defaultSpec.Flownode = in.defaultFlownodeSpec() } - return nil + return defaultSpec } -func (in *GreptimeDBCluster) defaultClusterSpec() *GreptimeDBClusterSpec { - var defaultGreptimeDBClusterSpec = &GreptimeDBClusterSpec{ - Base: &PodTemplateSpec{ - MainContainer: &MainContainerSpec{ - // The default liveness probe for the main container of GreptimeDBCluster. - LivenessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: defautlHealthEndpoint, - Port: intstr.FromInt(defaultHTTPPort), - }, - }, - }, - }, +func (in *GreptimeDBCluster) defaultFrontend() *FrontendSpec { + return &FrontendSpec{ + ComponentSpec: ComponentSpec{ + Template: &PodTemplateSpec{}, + Replicas: pointer.Int32(DefaultReplicas), + }, + Service: &ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, }, - Initializer: &InitializerSpec{Image: defaultInitializer}, - HTTPPort: int32(defaultHTTPPort), - RPCPort: int32(defaultRPCPort), - MySQLPort: int32(defaultMySQLPort), - PostgreSQLPort: int32(defaultPostgreSQLPort), - Version: defaultVersion, - } - - if in.Spec.Version == "" && - in.Spec.Base != nil && - in.Spec.Base.MainContainer != nil && - in.Spec.Base.MainContainer.Image != "" { - in.Spec.Version = getVersionFromImage(in.Spec.Base.MainContainer.Image) } +} - if in.Spec.Frontend != nil { - defaultGreptimeDBClusterSpec.Frontend = &FrontendSpec{ - ComponentSpec: ComponentSpec{ - Template: &PodTemplateSpec{}, - }, - Service: ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - }, - } - if in.Spec.Frontend.Replicas == nil { - in.Spec.Frontend.Replicas = proto.Int32(defaultFrontendReplicas) - } +func (in *GreptimeDBCluster) defaultMeta() *MetaSpec { + enableRegionFailover := false + if in.GetWALProvider().GetKafkaWAL() != nil { // If remote wal provider is enabled, enable region failover by default. + enableRegionFailover = true } - - if in.Spec.Meta != nil { - enableRegionFailover := false - if in.Spec.RemoteWalProvider != nil { // If remote wal provider is enabled, enable region failover by default. - enableRegionFailover = true - } - defaultGreptimeDBClusterSpec.Meta = &MetaSpec{ - ComponentSpec: ComponentSpec{ - Template: &PodTemplateSpec{}, - }, - RPCPort: int32(defaultMetaRPCPort), - HTTPPort: int32(defaultHTTPPort), - EnableRegionFailover: &enableRegionFailover, - } - if in.Spec.Meta.Replicas == nil { - in.Spec.Meta.Replicas = proto.Int32(defaultMetaReplicas) - } + return &MetaSpec{ + ComponentSpec: ComponentSpec{ + Template: &PodTemplateSpec{}, + Replicas: pointer.Int32(DefaultReplicas), + }, + RPCPort: DefaultMetaRPCPort, + HTTPPort: DefaultHTTPPort, + EnableRegionFailover: &enableRegionFailover, } +} - if in.Spec.Datanode != nil { - defaultGreptimeDBClusterSpec.Datanode = &DatanodeSpec{ - ComponentSpec: ComponentSpec{ - Template: &PodTemplateSpec{}, - }, - Storage: StorageSpec{ - Name: defaultDataNodeStorageName, - StorageSize: defaultDataNodeStorageSize, - MountPath: defaultDataNodeStorageMountPath, - StorageRetainPolicy: defaultStorageRetainPolicyType, - WalDir: defaultWalDir, - DataHome: defaultDataNodeStorageMountPath, - }, - RPCPort: int32(defaultRPCPort), - HTTPPort: int32(defaultHTTPPort), - } - if in.Spec.Datanode.Replicas == nil { - in.Spec.Datanode.Replicas = proto.Int32(defaultDatanodeReplicas) - } +func (in *GreptimeDBCluster) defaultDatanode() *DatanodeSpec { + return &DatanodeSpec{ + ComponentSpec: ComponentSpec{ + Template: &PodTemplateSpec{}, + Replicas: pointer.Int32(DefaultReplicas), + }, + RPCPort: DefaultRPCPort, + HTTPPort: DefaultHTTPPort, + Storage: defaultDatanodeStorage(), } +} - if in.Spec.Flownode != nil { - defaultGreptimeDBClusterSpec.Flownode = &FlownodeSpec{ - ComponentSpec: ComponentSpec{ - Template: &PodTemplateSpec{}, - }, - RPCPort: int32(defaultRPCPort), - } - if in.Spec.Flownode.Replicas == nil { - in.Spec.Flownode.Replicas = proto.Int32(defaultFlownodeReplicas) - } +func (in *GreptimeDBCluster) defaultFlownodeSpec() *FlownodeSpec { + return &FlownodeSpec{ + ComponentSpec: ComponentSpec{ + Template: &PodTemplateSpec{}, + Replicas: pointer.Int32(DefaultReplicas), + }, + RPCPort: DefaultRPCPort, } - - return defaultGreptimeDBClusterSpec } func (in *GreptimeDBCluster) mergeTemplate() error { @@ -248,7 +157,7 @@ func (in *GreptimeDBCluster) mergeFrontendTemplate() error { } // Reconfigure the probe settings based on the HTTP port. - in.Spec.Frontend.Template.MainContainer.LivenessProbe.HTTPGet.Port = intstr.FromInt(int(in.Spec.HTTPPort)) + in.Spec.Frontend.Template.MainContainer.LivenessProbe.HTTPGet.Port = intstr.FromInt32(in.Spec.HTTPPort) } return nil @@ -262,7 +171,7 @@ func (in *GreptimeDBCluster) mergeMetaTemplate() error { } // Reconfigure the probe settings based on the HTTP port. - in.Spec.Meta.Template.MainContainer.LivenessProbe.HTTPGet.Port = intstr.FromInt(int(in.Spec.Meta.HTTPPort)) + in.Spec.Meta.Template.MainContainer.LivenessProbe.HTTPGet.Port = intstr.FromInt32(in.Spec.Meta.HTTPPort) } return nil @@ -276,7 +185,7 @@ func (in *GreptimeDBCluster) mergeDatanodeTemplate() error { } // Reconfigure the probe settings based on the HTTP port. - in.Spec.Datanode.Template.MainContainer.LivenessProbe.HTTPGet.Port = intstr.FromInt(int(in.Spec.Datanode.HTTPPort)) + in.Spec.Datanode.Template.MainContainer.LivenessProbe.HTTPGet.Port = intstr.FromInt32(in.Spec.Datanode.HTTPPort) } return nil @@ -296,6 +205,63 @@ func (in *GreptimeDBCluster) mergeFlownodeTemplate() error { return nil } +func (in *GreptimeDBStandalone) SetDefaults() error { + if in == nil { + return nil + } + + if in.GetVersion() == "" && in.GetBaseMainContainer().GetImage() != "" { + in.Spec.Version = getVersionFromImage(in.GetBaseMainContainer().GetImage()) + } + + if err := mergo.Merge(&in.Spec, in.defaultSpec()); err != nil { + return err + } + + return nil +} + +func (in *GreptimeDBStandalone) defaultSpec() *GreptimeDBStandaloneSpec { + var defaultSpec = &GreptimeDBStandaloneSpec{ + Base: &PodTemplateSpec{ + MainContainer: &MainContainerSpec{ + // The default liveness probe for the main container of GreptimeDBStandalone. + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: DefautlHealthEndpoint, + Port: intstr.FromInt32(DefaultHTTPPort), + }, + }, + }, + }, + }, + HTTPPort: DefaultHTTPPort, + RPCPort: DefaultRPCPort, + MySQLPort: DefaultMySQLPort, + PostgreSQLPort: DefaultPostgreSQLPort, + Version: DefaultVersion, + Service: &ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + }, + DatanodeStorage: defaultDatanodeStorage(), + } + + return defaultSpec +} + +func defaultDatanodeStorage() *DatanodeStorageSpec { + return &DatanodeStorageSpec{ + DataHome: DefaultDataHome, + FileStorage: &FileStorage{ + Name: DefaultDatanodeFileStorageName, + StorageSize: DefaultDataSize, + MountPath: DefaultDataHome, + StorageRetainPolicy: DefaultStorageRetainPolicyType, + }, + } +} + func getVersionFromImage(imageURL string) string { tokens := strings.Split(imageURL, "/") if len(tokens) > 0 { @@ -305,5 +271,5 @@ func getVersionFromImage(imageURL string) string { return tokens[1] } } - return defaultVersion + return DefaultVersion } diff --git a/apis/v1alpha1/defaulting_test.go b/apis/v1alpha1/defaulting_test.go index ab55054a..aae96d47 100644 --- a/apis/v1alpha1/defaulting_test.go +++ b/apis/v1alpha1/defaulting_test.go @@ -83,3 +83,63 @@ func TestClusterSetDefaults(t *testing.T) { } } } + +func TestStandaloneSetDefaults(t *testing.T) { + const ( + testDir = "testdata/greptimedbstandalone" + inputFileName = "input.yaml" + expectFileName = "expect.yaml" + ) + + entries, err := os.ReadDir(testDir) + if err != nil { + t.Fatal(err) + } + + for _, entry := range entries { + if entry.IsDir() { + inputFile := filepath.Join(testDir, entry.Name(), inputFileName) + inputData, err := os.ReadFile(inputFile) + if err != nil { + t.Errorf("failed to read %s: %v", inputFile, err) + } + + expectFile := filepath.Join(testDir, entry.Name(), expectFileName) + expectData, err := os.ReadFile(expectFile) + if err != nil { + t.Fatalf("failed to read %s: %v", expectFile, err) + } + + var ( + input GreptimeDBStandalone + expect GreptimeDBStandalone + ) + if err := yaml.Unmarshal(inputData, &input); err != nil { + t.Fatalf("failed to unmarshal %s: %v", inputFile, err) + } + if err := yaml.Unmarshal(expectData, &expect); err != nil { + t.Fatalf("failed to unmarshal %s: %v", expectFile, err) + } + + if err := input.SetDefaults(); err != nil { + t.Fatalf("failed to set defaults: %v", err) + } + + if !reflect.DeepEqual(input, expect) { + rawInputData, err := yaml.Marshal(input) + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + rawExpectData, err := yaml.Marshal(expect) + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + // Use diffmatchpatch to get a human-readable diff. + dmp := diffmatchpatch.New() + t.Errorf("unexpected result for %s:\n%s", entry.Name(), dmp.DiffPrettyText(dmp.DiffMain(string(rawExpectData), string(rawInputData), false))) + } + } + } +} diff --git a/apis/v1alpha1/greptimedbcluster_types.go b/apis/v1alpha1/greptimedbcluster_types.go index 7bad75de..e4b9ad6b 100644 --- a/apis/v1alpha1/greptimedbcluster_types.go +++ b/apis/v1alpha1/greptimedbcluster_types.go @@ -18,12 +18,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ComponentSpec is the common specification for all components(frontend/meta/datanode). +// ComponentSpec is the common specification for all components(`frontend`/`meta`/`datanode`/`flownode`). type ComponentSpec struct { // The number of replicas of the components. // +optional // +kubebuilder:validation:Minimum=0 - Replicas *int32 `json:"replicas"` + Replicas *int32 `json:"replicas,omitempty"` // The content of the configuration file of the component in TOML format. // +optional @@ -38,16 +38,17 @@ type ComponentSpec struct { type MetaSpec struct { ComponentSpec `json:",inline"` - // The RPC port of the meta. + // RPCPort is the gRPC port of the meta. // +optional RPCPort int32 `json:"rpcPort,omitempty"` - // The HTTP port of the meta. + // HTTPPort is the HTTP port of the meta. // +optional HTTPPort int32 `json:"httpPort,omitempty"` - // +optional - EtcdEndpoints []string `json:"etcdEndpoints,omitempty"` + // EtcdEndpoints is the endpoints of the etcd cluster. + // +required + EtcdEndpoints []string `json:"etcdEndpoints"` // EnableCheckEtcdService indicates whether to check etcd cluster health when starting meta. // +optional @@ -57,38 +58,110 @@ type MetaSpec struct { // +optional EnableRegionFailover *bool `json:"enableRegionFailover,omitempty"` - // The meta will store data with this key prefix. + // StoreKeyPrefix is the prefix of the key in the etcd. We can use it to isolate the data of different clusters. // +optional StoreKeyPrefix string `json:"storeKeyPrefix,omitempty"` } +func (in *MetaSpec) GetConfig() string { + if in != nil { + return in.Config + } + return "" +} + +func (in *MetaSpec) IsEnableRegionFailover() bool { + return in != nil && in.EnableRegionFailover != nil && *in.EnableRegionFailover +} + +func (in *MetaSpec) GetStoreKeyPrefix() string { + if in != nil { + return in.StoreKeyPrefix + } + return "" +} + +func (in *MetaSpec) GetEtcdEndpoints() []string { + if in != nil { + return in.EtcdEndpoints + } + return nil +} + +func (in *MetaSpec) IsEnableCheckEtcdService() bool { + return in != nil && in.EnableCheckEtcdService +} + // FrontendSpec is the specification for frontend component. type FrontendSpec struct { ComponentSpec `json:",inline"` + // Service is the service configuration of the frontend. // +optional - Service ServiceSpec `json:"service,omitempty"` + Service *ServiceSpec `json:"service,omitempty"` - // The TLS configurations of the frontend. + // TLS is the TLS configuration of the frontend. // +optional TLS *TLSSpec `json:"tls,omitempty"` } +func (in *FrontendSpec) GetTLS() *TLSSpec { + if in != nil { + return in.TLS + } + return nil +} + +func (in *FrontendSpec) GetService() *ServiceSpec { + if in != nil { + return in.Service + } + return nil +} + +func (in *FrontendSpec) GetConfig() string { + if in != nil { + return in.Config + } + return "" +} + // DatanodeSpec is the specification for datanode component. type DatanodeSpec struct { ComponentSpec `json:",inline"` - // The RPC port of the datanode. + // RPCPort is the gRPC port of the datanode. // +optional RPCPort int32 `json:"rpcPort,omitempty"` - // The HTTP port of the datanode. + // HTTPPort is the HTTP port of the datanode. // +optional HTTPPort int32 `json:"httpPort,omitempty"` - // Storage is the storage specification for the datanode. + // Storage is the default file storage of the datanode. For example, WAL, cache, index etc. // +optional - Storage StorageSpec `json:"storage,omitempty"` + Storage *DatanodeStorageSpec `json:"storage,omitempty"` +} + +func (in *DatanodeSpec) GetConfig() string { + if in != nil { + return in.Config + } + return "" +} + +func (in *DatanodeSpec) GetFileStorage() *FileStorage { + if in != nil && in.Storage != nil { + return in.Storage.FileStorage + } + return nil +} + +func (in *DatanodeSpec) GetDataHome() string { + if in != nil && in.Storage != nil { + return in.Storage.DataHome + } + return "" } // FlownodeSpec is the specification for flownode component. @@ -100,8 +173,16 @@ type FlownodeSpec struct { RPCPort int32 `json:"rpcPort,omitempty"` } +func (in *FlownodeSpec) GetConfig() string { + if in != nil { + return in.Config + } + return "" +} + // InitializerSpec is the init container to set up components configurations before running the container. type InitializerSpec struct { + // The image of the initializer. // +optional Image string `json:"image,omitempty"` } @@ -113,102 +194,208 @@ type GreptimeDBClusterSpec struct { Base *PodTemplateSpec `json:"base,omitempty"` // Frontend is the specification of frontend node. - // +optional + // +required Frontend *FrontendSpec `json:"frontend"` // Meta is the specification of meta node. - // +optional + // +required Meta *MetaSpec `json:"meta"` // Datanode is the specification of datanode node. - // +optional + // +required Datanode *DatanodeSpec `json:"datanode"` // Flownode is the specification of flownode node. // +optional - Flownode *FlownodeSpec `json:"flownode"` + Flownode *FlownodeSpec `json:"flownode,omitempty"` + // HTTPPort is the HTTP port of the greptimedb cluster. // +optional HTTPPort int32 `json:"httpPort,omitempty"` + // RPCPort is the RPC port of the greptimedb cluster. // +optional RPCPort int32 `json:"rpcPort,omitempty"` + // MySQLPort is the MySQL port of the greptimedb cluster. // +optional MySQLPort int32 `json:"mysqlPort,omitempty"` + // PostgreSQLPort is the PostgreSQL port of the greptimedb cluster. // +optional PostgreSQLPort int32 `json:"postgreSQLPort,omitempty"` - // +optional - EnableInfluxDBProtocol bool `json:"enableInfluxDBProtocol,omitempty"` - + // PrometheusMonitor is the specification for creating PodMonitor or ServiceMonitor. // +optional PrometheusMonitor *PrometheusMonitorSpec `json:"prometheusMonitor,omitempty"` + // Version is the version of greptimedb. // +optional - // The version of greptimedb. Version string `json:"version,omitempty"` + // Initializer is the init container to set up components configurations before running the container. // +optional Initializer *InitializerSpec `json:"initializer,omitempty"` + // ObjectStorageProvider is the storage provider for the greptimedb cluster. // +optional - ObjectStorageProvider *ObjectStorageProvider `json:"objectStorage,omitempty"` + ObjectStorageProvider *ObjectStorageProviderSpec `json:"objectStorage,omitempty"` + // WALProvider is the WAL provider for the greptimedb cluster. // +optional - RemoteWalProvider *RemoteWalProvider `json:"remoteWal,omitempty"` + WALProvider *WALProviderSpec `json:"wal,omitempty"` +} + +func (in *GreptimeDBCluster) GetBaseMainContainer() *MainContainerSpec { + if in != nil && in.Spec.Base != nil { + return in.Spec.Base.MainContainer + } + return nil +} + +func (in *GreptimeDBCluster) GetVersion() string { + if in != nil { + return in.Spec.Version + } + return "" +} + +func (in *GreptimeDBCluster) GetFrontend() *FrontendSpec { + if in != nil { + return in.Spec.Frontend + } + return nil +} + +func (in *GreptimeDBCluster) GetMeta() *MetaSpec { + if in != nil { + return in.Spec.Meta + } + return nil +} + +func (in *GreptimeDBCluster) GetDatanode() *DatanodeSpec { + if in != nil { + return in.Spec.Datanode + } + return nil +} + +func (in *GreptimeDBCluster) GetFlownode() *FlownodeSpec { + return in.Spec.Flownode +} - // More cluster settings can be added here. +func (in *GreptimeDBCluster) GetWALProvider() *WALProviderSpec { + if in != nil { + return in.Spec.WALProvider + } + return nil +} + +func (in *GreptimeDBCluster) GetWALDir() string { + if in == nil { + return "" + } + + if in.Spec.WALProvider != nil && in.Spec.WALProvider.RaftEngineWAL != nil { + return in.Spec.WALProvider.RaftEngineWAL.FileStorage.MountPath + } + + if in.Spec.Datanode != nil && + in.Spec.Datanode.Storage != nil && + in.Spec.Datanode.Storage.DataHome != "" { + return in.Spec.Datanode.Storage.DataHome + "/wal" + } + + return "" +} + +func (in *GreptimeDBCluster) GetObjectStorageProvider() *ObjectStorageProviderSpec { + if in != nil { + return in.Spec.ObjectStorageProvider + } + return nil +} + +func (in *GreptimeDBCluster) GetPrometheusMonitor() *PrometheusMonitorSpec { + if in != nil { + return in.Spec.PrometheusMonitor + } + return nil } // GreptimeDBClusterStatus defines the observed state of GreptimeDBCluster type GreptimeDBClusterStatus struct { + // Frontend is the status of frontend node. // +optional Frontend FrontendStatus `json:"frontend,omitempty"` + // Meta is the status of meta node. // +optional Meta MetaStatus `json:"meta,omitempty"` + // Datanode is the status of datanode node. // +optional Datanode DatanodeStatus `json:"datanode,omitempty"` + // Flownode is the status of flownode node. // +optional Flownode FlownodeStatus `json:"flownode,omitempty"` + // Version is the version of greptimedb. // +optional Version string `json:"version,omitempty"` + // ClusterPhase is the phase of the greptimedb cluster. // +optional ClusterPhase Phase `json:"clusterPhase,omitempty"` + // Conditions is an array of current conditions. // +optional Conditions []Condition `json:"conditions,omitempty"` + // ObservedGeneration is the last observed generation. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty"` } +// FrontendStatus is the status of frontend node. type FrontendStatus struct { - Replicas int32 `json:"replicas"` + // Replicas is the number of replicas of the frontend. + Replicas int32 `json:"replicas"` + + // ReadyReplicas is the number of ready replicas of the frontend. ReadyReplicas int32 `json:"readyReplicas"` } +// MetaStatus is the status of meta node. type MetaStatus struct { - Replicas int32 `json:"replicas"` + // Replicas is the number of replicas of the meta. + Replicas int32 `json:"replicas"` + + // ReadyReplicas is the number of ready replicas of the meta. ReadyReplicas int32 `json:"readyReplicas"` + // EtcdEndpoints is the endpoints of the etcd cluster. // +optional EtcdEndpoints []string `json:"etcdEndpoints,omitempty"` } +// DatanodeStatus is the status of datanode node. type DatanodeStatus struct { - Replicas int32 `json:"replicas"` + // Replicas is the number of replicas of the datanode. + Replicas int32 `json:"replicas"` + + // ReadyReplicas is the number of ready replicas of the datanode. ReadyReplicas int32 `json:"readyReplicas"` } +// FlownodeStatus is the status of flownode node. type FlownodeStatus struct { - Replicas int32 `json:"replicas"` + // Replicas is the number of replicas of the flownode. + Replicas int32 `json:"replicas"` + + // ReadyReplicas is the number of ready replicas of the flownode. ReadyReplicas int32 `json:"readyReplicas"` } @@ -236,7 +423,10 @@ type GreptimeDBCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec GreptimeDBClusterSpec `json:"spec,omitempty"` + // Spec is the specification of the desired state of the GreptimeDBCluster. + Spec GreptimeDBClusterSpec `json:"spec,omitempty"` + + // Status is the most recently observed status of the GreptimeDBCluster. Status GreptimeDBClusterStatus `json:"status,omitempty"` } diff --git a/apis/v1alpha1/greptimedbstandalone_types.go b/apis/v1alpha1/greptimedbstandalone_types.go index aa15751b..cba585d4 100644 --- a/apis/v1alpha1/greptimedbstandalone_types.go +++ b/apis/v1alpha1/greptimedbstandalone_types.go @@ -24,6 +24,7 @@ type GreptimeDBStandaloneSpec struct { // +optional Base *PodTemplateSpec `json:"base,omitempty"` + // Service is the service configuration of greptimedb. // +optional Service *ServiceSpec `json:"service,omitempty"` @@ -31,55 +32,66 @@ type GreptimeDBStandaloneSpec struct { // +optional TLS *TLSSpec `json:"tls,omitempty"` + // HTTPPort is the port of the greptimedb http service. // +optional - HTTPServicePort int32 `json:"httpPort,omitempty"` + HTTPPort int32 `json:"httpPort,omitempty"` + // RPCPort is the port of the greptimedb rpc service. // +optional RPCPort int32 `json:"rpcPort,omitempty"` + // MySQLPort is the port of the greptimedb mysql service. // +optional MySQLPort int32 `json:"mysqlPort,omitempty"` + // PostgreSQLPort is the port of the greptimedb postgresql service. // +optional PostgreSQLPort int32 `json:"postgreSQLPort,omitempty"` - // +optional - EnableInfluxDBProtocol bool `json:"enableInfluxDBProtocol,omitempty"` - + // PrometheusMonitor is the specification for creating PodMonitor or ServiceMonitor. // +optional PrometheusMonitor *PrometheusMonitorSpec `json:"prometheusMonitor,omitempty"` + // Version is the version of the greptimedb. // +optional - // The version of greptimedb. Version string `json:"version,omitempty"` + // Initializer is the init container to set up components configurations before running the container. // +optional Initializer *InitializerSpec `json:"initializer,omitempty"` + // ObjectStorageProvider is the storage provider for the greptimedb cluster. // +optional - ObjectStorageProvider *ObjectStorageProvider `json:"objectStorage,omitempty"` + ObjectStorageProvider *ObjectStorageProviderSpec `json:"objectStorage,omitempty"` + // DatanodeStorage is the default file storage of the datanode. For example, WAL, cache, index etc. // +optional - LocalStorage *StorageSpec `json:"localStorage,omitempty"` + DatanodeStorage *DatanodeStorageSpec `json:"datanodeStorage,omitempty"` + // WALProvider is the WAL provider for the greptimedb cluster. // +optional - RemoteWalProvider *RemoteWalProvider `json:"remoteWal,omitempty"` + WALProvider *WALProviderSpec `json:"wal,omitempty"` + // The content of the configuration file of the component in TOML format. // +optional Config string `json:"config,omitempty"` } // GreptimeDBStandaloneStatus defines the observed state of GreptimeDBStandalone type GreptimeDBStandaloneStatus struct { + // Version is the version of the greptimedb. // +optional Version string `json:"version,omitempty"` + // StandalonePhase is the phase of the greptimedb standalone. // +optional StandalonePhase Phase `json:"standalonePhase,omitempty"` + // Conditions represent the latest available observations of an object's current state. // +optional Conditions []Condition `json:"conditions,omitempty"` + // ObservedGeneration is the most recent generation observed for this GreptimeDBStandalone. // +optional ObservedGeneration *int64 `json:"observedGeneration,omitempty"` } @@ -96,10 +108,91 @@ type GreptimeDBStandalone struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec GreptimeDBStandaloneSpec `json:"spec,omitempty"` + // Spec is the specification of the desired state of the GreptimeDBStandalone. + Spec GreptimeDBStandaloneSpec `json:"spec,omitempty"` + + // Status is the most recently observed status of the GreptimeDBStandalone. Status GreptimeDBStandaloneStatus `json:"status,omitempty"` } +func (in *GreptimeDBStandalone) GetConfig() string { + if in != nil { + return in.Spec.Config + } + return "" +} + +func (in *GreptimeDBStandalone) GetBaseMainContainer() *MainContainerSpec { + if in != nil && in.Spec.Base != nil { + return in.Spec.Base.MainContainer + } + return nil +} + +func (in *GreptimeDBStandalone) GetVersion() string { + if in != nil { + return in.Spec.Version + } + return "" +} + +func (in *GreptimeDBStandalone) GetPrometheusMonitor() *PrometheusMonitorSpec { + if in != nil { + return in.Spec.PrometheusMonitor + } + return nil +} + +func (in *GreptimeDBStandalone) GetTLS() *TLSSpec { + if in != nil { + return in.Spec.TLS + } + return nil +} + +func (in *GreptimeDBStandalone) GetWALProvider() *WALProviderSpec { + if in != nil { + return in.Spec.WALProvider + } + return nil +} + +func (in *GreptimeDBStandalone) GetObjectStorageProvider() *ObjectStorageProviderSpec { + if in != nil { + return in.Spec.ObjectStorageProvider + } + return nil +} + +func (in *GreptimeDBStandalone) GetWALDir() string { + if in == nil { + return "" + } + + if in.Spec.WALProvider != nil && in.Spec.WALProvider.RaftEngineWAL != nil { + return in.Spec.WALProvider.RaftEngineWAL.FileStorage.MountPath + } + if in.Spec.DatanodeStorage != nil && in.Spec.DatanodeStorage.DataHome != "" { + return in.Spec.DatanodeStorage.DataHome + "/wal" + } + + return "" +} + +func (in *GreptimeDBStandalone) GetDatanodeFileStorage() *FileStorage { + if in != nil && in.Spec.DatanodeStorage != nil { + return in.Spec.DatanodeStorage.FileStorage + } + return nil +} + +func (in *GreptimeDBStandalone) GetDataHome() string { + if in != nil && in.Spec.DatanodeStorage != nil { + return in.Spec.DatanodeStorage.DataHome + } + return "" +} + func (in *GreptimeDBStandaloneStatus) GetCondition(conditionType ConditionType) *Condition { return GetCondition(in.Conditions, conditionType) } diff --git a/apis/v1alpha1/testdata/greptimedbcluster/test00/expect.yaml b/apis/v1alpha1/testdata/greptimedbcluster/test00/expect.yaml index f421e688..36b4c663 100644 --- a/apis/v1alpha1/testdata/greptimedbcluster/test00/expect.yaml +++ b/apis/v1alpha1/testdata/greptimedbcluster/test00/expect.yaml @@ -19,7 +19,7 @@ spec: path: /health port: 4000 frontend: - replicas: 3 + replicas: 1 service: type: ClusterIP template: @@ -30,10 +30,12 @@ spec: path: /health port: 5000 meta: + etcdEndpoints: + - etcd.etcd-cluster.svc.cluster.local:2379 enableRegionFailover: false httpPort: 4000 rpcPort: 3002 - replicas: 3 + replicas: 1 template: main: image: greptime/greptimedb:latest @@ -44,14 +46,14 @@ spec: datanode: httpPort: 4000 rpcPort: 4001 - replicas: 1 + replicas: 3 storage: dataHome: /data/greptimedb - mountPath: /data/greptimedb - name: datanode - storageRetainPolicy: Retain - storageSize: 10Gi - walDir: /data/greptimedb/wal + fs: + name: datanode + mountPath: /data/greptimedb + storageRetainPolicy: Retain + storageSize: 10Gi template: main: image: greptime/greptimedb:latest diff --git a/apis/v1alpha1/testdata/greptimedbcluster/test00/input.yaml b/apis/v1alpha1/testdata/greptimedbcluster/test00/input.yaml index ae4242b3..25638830 100644 --- a/apis/v1alpha1/testdata/greptimedbcluster/test00/input.yaml +++ b/apis/v1alpha1/testdata/greptimedbcluster/test00/input.yaml @@ -8,9 +8,11 @@ spec: main: image: greptime/greptimedb:latest frontend: - replicas: 3 + replicas: 1 meta: - replicas: 3 - datanode: + etcdEndpoints: + - etcd.etcd-cluster.svc.cluster.local:2379 replicas: 1 + datanode: + replicas: 3 httpPort: 5000 diff --git a/apis/v1alpha1/testdata/greptimedbcluster/test01/expect.yaml b/apis/v1alpha1/testdata/greptimedbcluster/test01/expect.yaml index fbe356ef..89208a9d 100644 --- a/apis/v1alpha1/testdata/greptimedbcluster/test01/expect.yaml +++ b/apis/v1alpha1/testdata/greptimedbcluster/test01/expect.yaml @@ -49,7 +49,7 @@ spec: meta: enableRegionFailover: false etcdEndpoints: - - etcd.default:2379 + - etcd.etcd-cluster.svc.cluster.local:2379 httpPort: 4000 rpcPort: 3002 replicas: 1 @@ -76,11 +76,11 @@ spec: replicas: 1 storage: dataHome: /data/greptimedb - mountPath: /data/greptimedb - name: datanode - storageRetainPolicy: Retain - storageSize: 10Gi - walDir: /data/greptimedb/wal + fs: + name: datanode + mountPath: /data/greptimedb + storageRetainPolicy: Retain + storageSize: 10Gi template: main: image: greptime/greptimedb:latest diff --git a/apis/v1alpha1/testdata/greptimedbcluster/test01/input.yaml b/apis/v1alpha1/testdata/greptimedbcluster/test01/input.yaml index 24edd4af..fab24c6d 100644 --- a/apis/v1alpha1/testdata/greptimedbcluster/test01/input.yaml +++ b/apis/v1alpha1/testdata/greptimedbcluster/test01/input.yaml @@ -15,6 +15,7 @@ spec: cpu: "1" memory: "1Gi" frontend: + replicas: 1 template: main: image: greptime/greptimedb:latest @@ -22,8 +23,9 @@ spec: - --metasrv-addrs - meta.default:3002 meta: + replicas: 1 etcdEndpoints: - - etcd.default:2379 + - etcd.etcd-cluster.svc.cluster.local:2379 template: main: image: greptime/greptimedb:latest @@ -31,6 +33,7 @@ spec: - --store-addr - etcd.default:2379 datanode: + replicas: 1 template: main: image: greptime/greptimedb:latest diff --git a/apis/v1alpha1/testdata/greptimedbcluster/test02/expect.yaml b/apis/v1alpha1/testdata/greptimedbcluster/test02/expect.yaml index 1a18ef00..90c09ebe 100644 --- a/apis/v1alpha1/testdata/greptimedbcluster/test02/expect.yaml +++ b/apis/v1alpha1/testdata/greptimedbcluster/test02/expect.yaml @@ -11,10 +11,17 @@ spec: rpcPort: 4001 mysqlPort: 4002 postgreSQLPort: 4003 - remoteWal: + wal: kafka: brokerEndpoints: - kafka.default:9092 + objectStorage: + s3: + bucket: greptimedb + endpoint: s3.amazonaws.com + region: us-west-2 + root: /greptimedb + secretName: s3-credentials base: main: image: greptime/greptimedb:latest @@ -34,7 +41,9 @@ spec: path: /health port: 4000 meta: - # Should be true if remoteWal.kafka is set. + etcdEndpoints: + - etcd.etcd-cluster.svc.cluster.local:2379 + # Should be true if wal.kafka is set. enableRegionFailover: true httpPort: 4000 rpcPort: 3002 @@ -52,11 +61,11 @@ spec: replicas: 1 storage: dataHome: /data/greptimedb - mountPath: /data/greptimedb - name: datanode - storageRetainPolicy: Retain - storageSize: 10Gi - walDir: /data/greptimedb/wal + fs: + name: datanode + mountPath: /data/greptimedb + storageRetainPolicy: Retain + storageSize: 10Gi template: main: image: greptime/greptimedb:latest diff --git a/apis/v1alpha1/testdata/greptimedbcluster/test02/input.yaml b/apis/v1alpha1/testdata/greptimedbcluster/test02/input.yaml index 42784122..b994f2b3 100644 --- a/apis/v1alpha1/testdata/greptimedbcluster/test02/input.yaml +++ b/apis/v1alpha1/testdata/greptimedbcluster/test02/input.yaml @@ -11,9 +11,18 @@ spec: replicas: 1 meta: replicas: 1 + etcdEndpoints: + - etcd.etcd-cluster.svc.cluster.local:2379 datanode: replicas: 1 - remoteWal: + wal: kafka: brokerEndpoints: - kafka.default:9092 + objectStorage: + s3: + bucket: greptimedb + endpoint: s3.amazonaws.com + region: us-west-2 + root: /greptimedb + secretName: s3-credentials diff --git a/apis/v1alpha1/testdata/greptimedbstandalone/test00/expect.yaml b/apis/v1alpha1/testdata/greptimedbstandalone/test00/expect.yaml new file mode 100644 index 00000000..a34eadfb --- /dev/null +++ b/apis/v1alpha1/testdata/greptimedbstandalone/test00/expect.yaml @@ -0,0 +1,27 @@ +apiVersion: greptime.io/v1alpha1 +kind: GreptimeDBStandalone +metadata: + name: test00 + namespace: default +spec: + version: latest + httpPort: 4000 + mysqlPort: 4002 + postgreSQLPort: 4003 + rpcPort: 4001 + base: + main: + image: greptime/greptimedb:latest + livenessProbe: + httpGet: + path: /health + port: 4000 + service: + type: ClusterIP + datanodeStorage: + dataHome: /data/greptimedb + fs: + name: datanode + mountPath: /data/greptimedb + storageRetainPolicy: Retain + storageSize: 10Gi diff --git a/apis/v1alpha1/testdata/greptimedbstandalone/test00/input.yaml b/apis/v1alpha1/testdata/greptimedbstandalone/test00/input.yaml new file mode 100644 index 00000000..ff47d584 --- /dev/null +++ b/apis/v1alpha1/testdata/greptimedbstandalone/test00/input.yaml @@ -0,0 +1,9 @@ +apiVersion: greptime.io/v1alpha1 +kind: GreptimeDBStandalone +metadata: + name: test00 + namespace: default +spec: + base: + main: + image: greptime/greptimedb:latest diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 8ba61697..8b11b00e 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -23,6 +23,26 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheStorage) DeepCopyInto(out *CacheStorage) { + *out = *in + if in.FileStorage != nil { + in, out := &in.FileStorage, &out.FileStorage + *out = new(FileStorage) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheStorage. +func (in *CacheStorage) DeepCopy() *CacheStorage { + if in == nil { + return nil + } + out := new(CacheStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) { *out = *in @@ -69,7 +89,11 @@ func (in *Condition) DeepCopy() *Condition { func (in *DatanodeSpec) DeepCopyInto(out *DatanodeSpec) { *out = *in in.ComponentSpec.DeepCopyInto(&out.ComponentSpec) - in.Storage.DeepCopyInto(&out.Storage) + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(DatanodeStorageSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatanodeSpec. @@ -97,6 +121,46 @@ func (in *DatanodeStatus) DeepCopy() *DatanodeStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatanodeStorageSpec) DeepCopyInto(out *DatanodeStorageSpec) { + *out = *in + if in.FileStorage != nil { + in, out := &in.FileStorage, &out.FileStorage + *out = new(FileStorage) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatanodeStorageSpec. +func (in *DatanodeStorageSpec) DeepCopy() *DatanodeStorageSpec { + if in == nil { + return nil + } + out := new(DatanodeStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileStorage) DeepCopyInto(out *FileStorage) { + *out = *in + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileStorage. +func (in *FileStorage) DeepCopy() *FileStorage { + if in == nil { + return nil + } + out := new(FileStorage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlownodeSpec) DeepCopyInto(out *FlownodeSpec) { *out = *in @@ -132,7 +196,11 @@ func (in *FlownodeStatus) DeepCopy() *FlownodeStatus { func (in *FrontendSpec) DeepCopyInto(out *FrontendSpec) { *out = *in in.ComponentSpec.DeepCopyInto(&out.ComponentSpec) - in.Service.DeepCopyInto(&out.Service) + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceSpec) + (*in).DeepCopyInto(*out) + } if in.TLS != nil { in, out := &in.TLS, &out.TLS *out = new(TLSSpec) @@ -166,16 +234,16 @@ func (in *FrontendStatus) DeepCopy() *FrontendStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCSStorageProvider) DeepCopyInto(out *GCSStorageProvider) { +func (in *GCSStorage) DeepCopyInto(out *GCSStorage) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSStorageProvider. -func (in *GCSStorageProvider) DeepCopy() *GCSStorageProvider { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSStorage. +func (in *GCSStorage) DeepCopy() *GCSStorage { if in == nil { return nil } - out := new(GCSStorageProvider) + out := new(GCSStorage) in.DeepCopyInto(out) return out } @@ -279,12 +347,12 @@ func (in *GreptimeDBClusterSpec) DeepCopyInto(out *GreptimeDBClusterSpec) { } if in.ObjectStorageProvider != nil { in, out := &in.ObjectStorageProvider, &out.ObjectStorageProvider - *out = new(ObjectStorageProvider) + *out = new(ObjectStorageProviderSpec) (*in).DeepCopyInto(*out) } - if in.RemoteWalProvider != nil { - in, out := &in.RemoteWalProvider, &out.RemoteWalProvider - *out = new(RemoteWalProvider) + if in.WALProvider != nil { + in, out := &in.WALProvider, &out.WALProvider + *out = new(WALProviderSpec) (*in).DeepCopyInto(*out) } } @@ -419,17 +487,17 @@ func (in *GreptimeDBStandaloneSpec) DeepCopyInto(out *GreptimeDBStandaloneSpec) } if in.ObjectStorageProvider != nil { in, out := &in.ObjectStorageProvider, &out.ObjectStorageProvider - *out = new(ObjectStorageProvider) + *out = new(ObjectStorageProviderSpec) (*in).DeepCopyInto(*out) } - if in.LocalStorage != nil { - in, out := &in.LocalStorage, &out.LocalStorage - *out = new(StorageSpec) + if in.DatanodeStorage != nil { + in, out := &in.DatanodeStorage, &out.DatanodeStorage + *out = new(DatanodeStorageSpec) (*in).DeepCopyInto(*out) } - if in.RemoteWalProvider != nil { - in, out := &in.RemoteWalProvider, &out.RemoteWalProvider - *out = new(RemoteWalProvider) + if in.WALProvider != nil { + in, out := &in.WALProvider, &out.WALProvider + *out = new(WALProviderSpec) (*in).DeepCopyInto(*out) } } @@ -487,7 +555,7 @@ func (in *InitializerSpec) DeepCopy() *InitializerSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KafkaRemoteWal) DeepCopyInto(out *KafkaRemoteWal) { +func (in *KafkaWAL) DeepCopyInto(out *KafkaWAL) { *out = *in if in.BrokerEndpoints != nil { in, out := &in.BrokerEndpoints, &out.BrokerEndpoints @@ -496,12 +564,12 @@ func (in *KafkaRemoteWal) DeepCopyInto(out *KafkaRemoteWal) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaRemoteWal. -func (in *KafkaRemoteWal) DeepCopy() *KafkaRemoteWal { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaWAL. +func (in *KafkaWAL) DeepCopy() *KafkaWAL { if in == nil { return nil } - out := new(KafkaRemoteWal) + out := new(KafkaWAL) in.DeepCopyInto(out) return out } @@ -608,46 +676,51 @@ func (in *MetaStatus) DeepCopy() *MetaStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSSStorageProvider) DeepCopyInto(out *OSSStorageProvider) { +func (in *OSSStorage) DeepCopyInto(out *OSSStorage) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSStorageProvider. -func (in *OSSStorageProvider) DeepCopy() *OSSStorageProvider { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSSStorage. +func (in *OSSStorage) DeepCopy() *OSSStorage { if in == nil { return nil } - out := new(OSSStorageProvider) + out := new(OSSStorage) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectStorageProvider) DeepCopyInto(out *ObjectStorageProvider) { +func (in *ObjectStorageProviderSpec) DeepCopyInto(out *ObjectStorageProviderSpec) { *out = *in if in.S3 != nil { in, out := &in.S3, &out.S3 - *out = new(S3StorageProvider) + *out = new(S3Storage) **out = **in } if in.OSS != nil { in, out := &in.OSS, &out.OSS - *out = new(OSSStorageProvider) + *out = new(OSSStorage) **out = **in } if in.GCS != nil { in, out := &in.GCS, &out.GCS - *out = new(GCSStorageProvider) + *out = new(GCSStorage) **out = **in } + if in.Cache != nil { + in, out := &in.Cache, &out.Cache + *out = new(CacheStorage) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageProvider. -func (in *ObjectStorageProvider) DeepCopy() *ObjectStorageProvider { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageProviderSpec. +func (in *ObjectStorageProviderSpec) DeepCopy() *ObjectStorageProviderSpec { if in == nil { return nil } - out := new(ObjectStorageProvider) + out := new(ObjectStorageProviderSpec) in.DeepCopyInto(out) return out } @@ -710,36 +783,36 @@ func (in *PrometheusMonitorSpec) DeepCopy() *PrometheusMonitorSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RemoteWalProvider) DeepCopyInto(out *RemoteWalProvider) { +func (in *RaftEngineWAL) DeepCopyInto(out *RaftEngineWAL) { *out = *in - if in.KafkaRemoteWal != nil { - in, out := &in.KafkaRemoteWal, &out.KafkaRemoteWal - *out = new(KafkaRemoteWal) + if in.FileStorage != nil { + in, out := &in.FileStorage, &out.FileStorage + *out = new(FileStorage) (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWalProvider. -func (in *RemoteWalProvider) DeepCopy() *RemoteWalProvider { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RaftEngineWAL. +func (in *RaftEngineWAL) DeepCopy() *RaftEngineWAL { if in == nil { return nil } - out := new(RemoteWalProvider) + out := new(RaftEngineWAL) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *S3StorageProvider) DeepCopyInto(out *S3StorageProvider) { +func (in *S3Storage) DeepCopyInto(out *S3Storage) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3StorageProvider. -func (in *S3StorageProvider) DeepCopy() *S3StorageProvider { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Storage. +func (in *S3Storage) DeepCopy() *S3Storage { if in == nil { return nil } - out := new(S3StorageProvider) + out := new(S3Storage) in.DeepCopyInto(out) return out } @@ -849,36 +922,41 @@ func (in *SlimPodSpec) DeepCopy() *SlimPodSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { +func (in *TLSSpec) DeepCopyInto(out *TLSSpec) { *out = *in - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. -func (in *StorageSpec) DeepCopy() *StorageSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSpec. +func (in *TLSSpec) DeepCopy() *TLSSpec { if in == nil { return nil } - out := new(StorageSpec) + out := new(TLSSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLSSpec) DeepCopyInto(out *TLSSpec) { +func (in *WALProviderSpec) DeepCopyInto(out *WALProviderSpec) { *out = *in + if in.RaftEngineWAL != nil { + in, out := &in.RaftEngineWAL, &out.RaftEngineWAL + *out = new(RaftEngineWAL) + (*in).DeepCopyInto(*out) + } + if in.KafkaWAL != nil { + in, out := &in.KafkaWAL, &out.KafkaWAL + *out = new(KafkaWAL) + (*in).DeepCopyInto(*out) + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSpec. -func (in *TLSSpec) DeepCopy() *TLSSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WALProviderSpec. +func (in *WALProviderSpec) DeepCopy() *WALProviderSpec { if in == nil { return nil } - out := new(TLSSpec) + out := new(WALProviderSpec) in.DeepCopyInto(out) return out } diff --git a/config/crd/resources/greptime.io_greptimedbclusters.yaml b/config/crd/resources/greptime.io_greptimedbclusters.yaml index 8a02f52d..f2c21b1b 100644 --- a/config/crd/resources/greptime.io_greptimedbclusters.yaml +++ b/config/crd/resources/greptime.io_greptimedbclusters.yaml @@ -2830,22 +2830,23 @@ spec: properties: dataHome: type: string - mountPath: - type: string - name: - type: string - storageClassName: - type: string - storageRetainPolicy: - enum: - - Retain - - Delete - type: string - storageSize: - pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) - type: string - walDir: - type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object type: object template: properties: @@ -5610,8 +5611,6 @@ spec: type: array type: object type: object - enableInfluxDBProtocol: - type: boolean flownode: properties: config: @@ -11175,6 +11174,8 @@ spec: properties: secretName: type: string + required: + - secretName type: object type: object httpPort: @@ -13971,16 +13972,36 @@ spec: type: object type: array type: object + required: + - etcdEndpoints type: object mysqlPort: format: int32 type: integer objectStorage: properties: - cacheCapacity: - type: string - cachePath: - type: string + cache: + properties: + cacheCapacity: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object gcs: properties: bucket: @@ -13993,6 +14014,9 @@ spec: type: string secretName: type: string + required: + - bucket + - root type: object oss: properties: @@ -14006,6 +14030,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object s3: properties: @@ -14019,6 +14047,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object type: object postgreSQLPort: @@ -14034,8 +14066,15 @@ spec: additionalProperties: type: string type: object + required: + - enabled type: object - remoteWal: + rpcPort: + format: int32 + type: integer + version: + type: string + wal: properties: kafka: properties: @@ -14043,13 +14082,34 @@ spec: items: type: string type: array + required: + - brokerEndpoints + type: object + raftEngine: + properties: + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object type: object type: object - rpcPort: - format: int32 - type: integer - version: - type: string + required: + - datanode + - frontend + - meta type: object status: properties: diff --git a/config/crd/resources/greptime.io_greptimedbstandalones.yaml b/config/crd/resources/greptime.io_greptimedbstandalones.yaml index e247afca..6d74a4ee 100644 --- a/config/crd/resources/greptime.io_greptimedbstandalones.yaml +++ b/config/crd/resources/greptime.io_greptimedbstandalones.yaml @@ -2802,8 +2802,28 @@ spec: type: object config: type: string - enableInfluxDBProtocol: - type: boolean + datanodeStorage: + properties: + dataHome: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object httpPort: format: int32 type: integer @@ -2812,36 +2832,33 @@ spec: image: type: string type: object - localStorage: - properties: - dataHome: - type: string - mountPath: - type: string - name: - type: string - storageClassName: - type: string - storageRetainPolicy: - enum: - - Retain - - Delete - type: string - storageSize: - pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) - type: string - walDir: - type: string - type: object mysqlPort: format: int32 type: integer objectStorage: properties: - cacheCapacity: - type: string - cachePath: - type: string + cache: + properties: + cacheCapacity: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object gcs: properties: bucket: @@ -2854,6 +2871,9 @@ spec: type: string secretName: type: string + required: + - bucket + - root type: object oss: properties: @@ -2867,6 +2887,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object s3: properties: @@ -2880,6 +2904,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object type: object postgreSQLPort: @@ -2895,16 +2923,8 @@ spec: additionalProperties: type: string type: object - type: object - remoteWal: - properties: - kafka: - properties: - brokerEndpoints: - items: - type: string - type: array - type: object + required: + - enabled type: object rpcPort: format: int32 @@ -2928,9 +2948,43 @@ spec: properties: secretName: type: string + required: + - secretName type: object version: type: string + wal: + properties: + kafka: + properties: + brokerEndpoints: + items: + type: string + type: array + required: + - brokerEndpoints + type: object + raftEngine: + properties: + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object + type: object type: object status: properties: diff --git a/controllers/common/common.go b/controllers/common/common.go index b344e935..f1eccb5e 100644 --- a/controllers/common/common.go +++ b/controllers/common/common.go @@ -17,12 +17,27 @@ package common import ( monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/GreptimeTeam/greptimedb-operator/apis/v1alpha1" "github.com/GreptimeTeam/greptimedb-operator/controllers/constant" ) +var ( + DatanodeFileStorageLabels = map[string]string{ + "app.greptime.io/fileStorageType": "datanode", + } + + WALFileStorageLabels = map[string]string{ + "app.greptime.io/fileStorageType": "wal", + } + + CacheFileStorageLabels = map[string]string{ + "app.greptime.io/fileStorageType": "cache", + } +) + func ResourceName(name string, componentKind v1alpha1.ComponentKind) string { return name + "-" + string(componentKind) } @@ -153,3 +168,23 @@ func GeneratePodTemplateSpec(kind v1alpha1.ComponentKind, template *v1alpha1.Pod return spec } + +func FileStorageToPVC(fs v1alpha1.FileStorageAccessor, labels map[string]string) *corev1.PersistentVolumeClaim { + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fs.GetName(), + Labels: labels, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: fs.GetStorageClassName(), + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(fs.GetSize()), + }, + }, + }, + } +} diff --git a/controllers/greptimedbcluster/controller.go b/controllers/greptimedbcluster/controller.go index 7b4e04f3..6186c83f 100644 --- a/controllers/greptimedbcluster/controller.go +++ b/controllers/greptimedbcluster/controller.go @@ -277,12 +277,12 @@ func (r *Reconciler) validate(ctx context.Context, cluster *v1alpha1.GreptimeDBC return fmt.Errorf("get tls secret '%s' failed, error: '%v'", cluster.Spec.Frontend.TLS.SecretName, err) } - if _, ok := tlsSecret.Data[deployers.TLSCrtSecretKey]; !ok { - return fmt.Errorf("tls secret '%s' does not contain key '%s'", cluster.Spec.Frontend.TLS.SecretName, deployers.TLSCrtSecretKey) + if _, ok := tlsSecret.Data[v1alpha1.TLSCrtSecretKey]; !ok { + return fmt.Errorf("tls secret '%s' does not contain key '%s'", cluster.Spec.Frontend.TLS.SecretName, v1alpha1.TLSCrtSecretKey) } - if _, ok := tlsSecret.Data[deployers.TLSKeySecretKey]; !ok { - return fmt.Errorf("tls secret '%s' does not contain key '%s'", cluster.Spec.Frontend.TLS.SecretName, deployers.TLSKeySecretKey) + if _, ok := tlsSecret.Data[v1alpha1.TLSKeySecretKey]; !ok { + return fmt.Errorf("tls secret '%s' does not contain key '%s'", cluster.Spec.Frontend.TLS.SecretName, v1alpha1.TLSKeySecretKey) } } } @@ -309,9 +309,9 @@ func (r *Reconciler) validate(ctx context.Context, cluster *v1alpha1.GreptimeDBC if err := r.validateTomlConfig(cluster.Spec.Meta.Config); err != nil { return fmt.Errorf("invalid meta toml config: %v", err) } - if cluster.Spec.Meta.EnableRegionFailover != nil && *cluster.Spec.Meta.EnableRegionFailover { - if cluster.Spec.RemoteWalProvider == nil { - return fmt.Errorf("remote wal provider must be specified when enable region failover") + if cluster.GetMeta().IsEnableRegionFailover() { + if cluster.GetWALProvider().GetKafkaWAL() == nil { + return fmt.Errorf("meta enable region failover requires kafka WAL") } } } diff --git a/controllers/greptimedbcluster/deployers/datanode.go b/controllers/greptimedbcluster/deployers/datanode.go index 6b850dc5..92abca19 100644 --- a/controllers/greptimedbcluster/deployers/datanode.go +++ b/controllers/greptimedbcluster/deployers/datanode.go @@ -24,7 +24,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" @@ -82,11 +81,21 @@ func (d *DatanodeDeployer) CleanUp(ctx context.Context, crdObject client.Object) return err } - if cluster.Spec.Datanode != nil { - if cluster.Spec.Datanode.Storage.StorageRetainPolicy == v1alpha1.StorageRetainPolicyTypeDelete { - if err := d.deleteStorage(ctx, cluster); err != nil { - return err - } + if cluster.GetDatanode().GetFileStorage().GetPolicy() == v1alpha1.StorageRetainPolicyTypeDelete { + if err := d.deleteStorage(ctx, cluster.Namespace, cluster.Name, common.DatanodeFileStorageLabels); err != nil { + return err + } + } + + if cluster.GetWALProvider().GetRaftEngineWAL().GetFileStorage().GetPolicy() == v1alpha1.StorageRetainPolicyTypeDelete { + if err := d.deleteStorage(ctx, cluster.Namespace, cluster.Name, common.WALFileStorageLabels); err != nil { + return err + } + } + + if cluster.GetObjectStorageProvider().GetCacheFileStorage().GetPolicy() == v1alpha1.StorageRetainPolicyTypeDelete { + if err := d.deleteStorage(ctx, cluster.Namespace, cluster.Name, common.CacheFileStorageLabels); err != nil { + return err } } @@ -225,21 +234,23 @@ func (d *DatanodeDeployer) requestMetasrvForMaintenance(cluster *v1alpha1.Grepti return nil } -func (d *DatanodeDeployer) deleteStorage(ctx context.Context, cluster *v1alpha1.GreptimeDBCluster) error { +func (d *DatanodeDeployer) deleteStorage(ctx context.Context, namespace, name string, additionalLabels map[string]string) error { klog.Infof("Deleting datanode storage...") + matchedLabels := map[string]string{ + constant.GreptimeDBComponentName: common.ResourceName(name, v1alpha1.DatanodeComponentKind), + } + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(cluster.Name, v1alpha1.DatanodeComponentKind), - }, + MatchLabels: util.MergeStringMap(matchedLabels, additionalLabels), }) if err != nil { return err } - pvcList := new(corev1.PersistentVolumeClaimList) + claims := new(corev1.PersistentVolumeClaimList) - err = d.List(ctx, pvcList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}) + err = d.List(ctx, claims, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}) if errors.IsNotFound(err) { return nil } @@ -247,7 +258,7 @@ func (d *DatanodeDeployer) deleteStorage(ctx context.Context, cluster *v1alpha1. return err } - for _, pvc := range pvcList.Items { + for _, pvc := range claims.Items { klog.Infof("Deleting datanode PVC: %s", pvc.Name) if err := d.Delete(ctx, &pvc); err != nil { return err @@ -300,9 +311,7 @@ func (d *DatanodeDeployer) isOldPodRestart(new, old appsv1.StatefulSet) bool { } func (d *DatanodeDeployer) shouldUserMaintenanceMode(cluster *v1alpha1.GreptimeDBCluster) bool { - if cluster.Spec.RemoteWalProvider != nil && - cluster.Spec.Meta.EnableRegionFailover != nil && - *cluster.Spec.Meta.EnableRegionFailover { + if cluster.GetWALProvider().GetKafkaWAL() != nil && cluster.GetMeta().IsEnableRegionFailover() { return true } return false @@ -394,7 +403,7 @@ func (b *datanodeBuilder) BuildStatefulSet() deployer.Builder { }, }, Template: b.generatePodTemplateSpec(), - VolumeClaimTemplates: b.generatePVC(), + VolumeClaimTemplates: b.generatePVCs(), }, } @@ -455,7 +464,7 @@ func (b *datanodeBuilder) generatePodTemplateSpec() corev1.PodTemplateSpec { } b.mountConfigDir(podTemplateSpec) - b.addStorageDirMounts(podTemplateSpec) + b.addVolumeMounts(podTemplateSpec) b.addInitConfigDirVolume(podTemplateSpec) podTemplateSpec.Spec.Containers[constant.MainContainerIndex].Ports = b.containerPorts() @@ -467,25 +476,25 @@ func (b *datanodeBuilder) generatePodTemplateSpec() corev1.PodTemplateSpec { return *podTemplateSpec } -func (b *datanodeBuilder) generatePVC() []corev1.PersistentVolumeClaim { - return []corev1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: b.Cluster.Spec.Datanode.Storage.Name, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: b.Cluster.Spec.Datanode.Storage.StorageClassName, - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse(b.Cluster.Spec.Datanode.Storage.StorageSize), - }, - }, - }, - }, +func (b *datanodeBuilder) generatePVCs() []corev1.PersistentVolumeClaim { + var claims []corev1.PersistentVolumeClaim + + // It's always not nil because it's the default value. + if fs := b.Cluster.GetDatanode().GetFileStorage(); fs != nil { + claims = append(claims, *common.FileStorageToPVC(fs, common.DatanodeFileStorageLabels)) + } + + // Allocate the standalone WAL storage for the raft-engine. + if fs := b.Cluster.GetWALProvider().GetRaftEngineWAL().GetFileStorage(); fs != nil { + claims = append(claims, *common.FileStorageToPVC(fs, common.WALFileStorageLabels)) + } + + // Allocate the standalone cache file storage for the datanode. + if fs := b.Cluster.GetObjectStorageProvider().GetCacheFileStorage(); fs != nil { + claims = append(claims, *common.FileStorageToPVC(fs, common.CacheFileStorageLabels)) } + + return claims } func (b *datanodeBuilder) generateInitializer() *corev1.Container { @@ -556,15 +565,36 @@ func (b *datanodeBuilder) mountConfigDir(template *corev1.PodTemplateSpec) { ) } -func (b *datanodeBuilder) addStorageDirMounts(template *corev1.PodTemplateSpec) { - // The volume is defined in the PVC. - template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = - append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, - corev1.VolumeMount{ - Name: b.Cluster.Spec.Datanode.Storage.Name, - MountPath: b.Cluster.Spec.Datanode.Storage.MountPath, - }, - ) +func (b *datanodeBuilder) addVolumeMounts(template *corev1.PodTemplateSpec) { + if fs := b.Cluster.GetDatanode().GetFileStorage(); fs != nil { + template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = + append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, + corev1.VolumeMount{ + Name: fs.GetName(), + MountPath: fs.GetMountPath(), + }, + ) + } + + if fs := b.Cluster.GetWALProvider().GetRaftEngineWAL().GetFileStorage(); fs != nil { + template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = + append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, + corev1.VolumeMount{ + Name: fs.GetName(), + MountPath: fs.GetMountPath(), + }, + ) + } + + if fs := b.Cluster.GetObjectStorageProvider().GetCacheFileStorage(); fs != nil { + template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = + append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, + corev1.VolumeMount{ + Name: fs.GetName(), + MountPath: fs.GetMountPath(), + }, + ) + } } // The init-config volume is used for initializer. diff --git a/controllers/greptimedbcluster/deployers/frontend.go b/controllers/greptimedbcluster/deployers/frontend.go index 455da49f..6030f214 100644 --- a/controllers/greptimedbcluster/deployers/frontend.go +++ b/controllers/greptimedbcluster/deployers/frontend.go @@ -36,11 +36,6 @@ import ( k8sutil "github.com/GreptimeTeam/greptimedb-operator/pkg/util/k8s" ) -const ( - TLSCrtSecretKey = "tls.crt" - TLSKeySecretKey = "tls.key" -) - type FrontendDeployer struct { *CommonDeployer } @@ -254,8 +249,8 @@ func (b *frontendBuilder) generateMainContainerArgs() []string { if b.Cluster.Spec.Frontend != nil && b.Cluster.Spec.Frontend.TLS != nil { args = append(args, []string{ "--tls-mode", constant.DefaultTLSMode, - "--tls-cert-path", path.Join(constant.GreptimeDBTLSDir, TLSCrtSecretKey), - "--tls-key-path", path.Join(constant.GreptimeDBTLSDir, TLSKeySecretKey), + "--tls-cert-path", path.Join(constant.GreptimeDBTLSDir, v1alpha1.TLSCrtSecretKey), + "--tls-key-path", path.Join(constant.GreptimeDBTLSDir, v1alpha1.TLSKeySecretKey), }...) } diff --git a/controllers/greptimedbstandalone/controller.go b/controllers/greptimedbstandalone/controller.go index 34ca5429..686bb80e 100644 --- a/controllers/greptimedbstandalone/controller.go +++ b/controllers/greptimedbstandalone/controller.go @@ -38,7 +38,6 @@ import ( "github.com/GreptimeTeam/greptimedb-operator/apis/v1alpha1" "github.com/GreptimeTeam/greptimedb-operator/cmd/operator/app/options" - "github.com/GreptimeTeam/greptimedb-operator/controllers/greptimedbcluster/deployers" "github.com/GreptimeTeam/greptimedb-operator/pkg/deployer" ) @@ -254,12 +253,12 @@ func (r *Reconciler) validate(ctx context.Context, standalone *v1alpha1.Greptime return fmt.Errorf("get tls secret '%s' failed, error: '%v'", standalone.Spec.TLS.SecretName, err) } - if _, ok := tlsSecret.Data[deployers.TLSCrtSecretKey]; !ok { - return fmt.Errorf("tls secret '%s' does not contain key '%s'", standalone.Spec.TLS.SecretName, deployers.TLSCrtSecretKey) + if _, ok := tlsSecret.Data[v1alpha1.TLSCrtSecretKey]; !ok { + return fmt.Errorf("tls secret '%s' does not contain key '%s'", standalone.Spec.TLS.SecretName, v1alpha1.TLSCrtSecretKey) } - if _, ok := tlsSecret.Data[deployers.TLSKeySecretKey]; !ok { - return fmt.Errorf("tls secret '%s' does not contain key '%s'", standalone.Spec.TLS.SecretName, deployers.TLSKeySecretKey) + if _, ok := tlsSecret.Data[v1alpha1.TLSKeySecretKey]; !ok { + return fmt.Errorf("tls secret '%s' does not contain key '%s'", standalone.Spec.TLS.SecretName, v1alpha1.TLSKeySecretKey) } } } diff --git a/controllers/greptimedbstandalone/deployer.go b/controllers/greptimedbstandalone/deployer.go index 21e8baae..68efdccc 100644 --- a/controllers/greptimedbstandalone/deployer.go +++ b/controllers/greptimedbstandalone/deployer.go @@ -22,17 +22,16 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/GreptimeTeam/greptimedb-operator/apis/v1alpha1" "github.com/GreptimeTeam/greptimedb-operator/controllers/common" "github.com/GreptimeTeam/greptimedb-operator/controllers/constant" - "github.com/GreptimeTeam/greptimedb-operator/controllers/greptimedbcluster/deployers" "github.com/GreptimeTeam/greptimedb-operator/pkg/dbconfig" "github.com/GreptimeTeam/greptimedb-operator/pkg/deployer" "github.com/GreptimeTeam/greptimedb-operator/pkg/util" @@ -59,15 +58,15 @@ func NewStandaloneDeployer(mgr ctrl.Manager) *StandaloneDeployer { } } -func (s *StandaloneDeployer) NewBuilder(crdObject client.Object) deployer.Builder { +func (d *StandaloneDeployer) NewBuilder(crdObject client.Object) deployer.Builder { sb := &standaloneBuilder{ DefaultBuilder: &deployer.DefaultBuilder{ - Scheme: s.Scheme, + Scheme: d.Scheme, Owner: crdObject, }, } - standalone, err := s.getStandalone(crdObject) + standalone, err := d.getStandalone(crdObject) if err != nil { sb.Err = err } @@ -76,8 +75,8 @@ func (s *StandaloneDeployer) NewBuilder(crdObject client.Object) deployer.Builde return sb } -func (s *StandaloneDeployer) Generate(crdObject client.Object) ([]client.Object, error) { - objects, err := s.NewBuilder(crdObject). +func (d *StandaloneDeployer) Generate(crdObject client.Object) ([]client.Object, error) { + objects, err := d.NewBuilder(crdObject). BuildService(). BuildConfigMap(). BuildStatefulSet(). @@ -91,14 +90,26 @@ func (s *StandaloneDeployer) Generate(crdObject client.Object) ([]client.Object, return objects, nil } -func (s *StandaloneDeployer) CleanUp(ctx context.Context, crdObject client.Object) error { - cluster, err := s.getStandalone(crdObject) +func (d *StandaloneDeployer) CleanUp(ctx context.Context, crdObject client.Object) error { + standalone, err := d.getStandalone(crdObject) if err != nil { return err } - if cluster.Spec.LocalStorage != nil && cluster.Spec.LocalStorage.StorageRetainPolicy == v1alpha1.StorageRetainPolicyTypeDelete { - if err := s.deleteStorage(ctx, cluster); err != nil { + if standalone.GetDatanodeFileStorage().GetPolicy() == v1alpha1.StorageRetainPolicyTypeDelete { + if err := d.deleteStorage(ctx, standalone.Namespace, standalone.Name, common.DatanodeFileStorageLabels); err != nil { + return err + } + } + + if standalone.GetWALProvider().GetRaftEngineWAL().GetFileStorage().GetPolicy() == v1alpha1.StorageRetainPolicyTypeDelete { + if err := d.deleteStorage(ctx, standalone.Namespace, standalone.Name, common.WALFileStorageLabels); err != nil { + return err + } + } + + if standalone.GetObjectStorageProvider().GetCacheFileStorage().GetPolicy() == v1alpha1.StorageRetainPolicyTypeDelete { + if err := d.deleteStorage(ctx, standalone.Namespace, standalone.Name, common.CacheFileStorageLabels); err != nil { return err } } @@ -106,8 +117,8 @@ func (s *StandaloneDeployer) CleanUp(ctx context.Context, crdObject client.Objec return nil } -func (s *StandaloneDeployer) CheckAndUpdateStatus(ctx context.Context, crdObject client.Object) (bool, error) { - standalone, err := s.getStandalone(crdObject) +func (d *StandaloneDeployer) CheckAndUpdateStatus(ctx context.Context, crdObject client.Object) (bool, error) { + standalone, err := d.getStandalone(crdObject) if err != nil { return false, err } @@ -121,7 +132,7 @@ func (s *StandaloneDeployer) CheckAndUpdateStatus(ctx context.Context, crdObject } ) - err = s.Get(ctx, objectKey, sts) + err = d.Get(ctx, objectKey, sts) if errors.IsNotFound(err) { return false, nil } @@ -132,7 +143,7 @@ func (s *StandaloneDeployer) CheckAndUpdateStatus(ctx context.Context, crdObject return k8sutil.IsStatefulSetReady(sts), nil } -func (s *StandaloneDeployer) getStandalone(crdObject client.Object) (*v1alpha1.GreptimeDBStandalone, error) { +func (d *StandaloneDeployer) getStandalone(crdObject client.Object) (*v1alpha1.GreptimeDBStandalone, error) { standalone, ok := crdObject.(*v1alpha1.GreptimeDBStandalone) if !ok { return nil, fmt.Errorf("the object is not a GreptimeDBStandalone") @@ -140,21 +151,27 @@ func (s *StandaloneDeployer) getStandalone(crdObject client.Object) (*v1alpha1.G return standalone, nil } -func (d *StandaloneDeployer) deleteStorage(ctx context.Context, standalone *v1alpha1.GreptimeDBStandalone) error { +func (d *StandaloneDeployer) deleteStorage(ctx context.Context, namespace, name string, additionalLabels map[string]string) error { klog.Infof("Deleting standalone storage...") + matchedLabels := map[string]string{ + constant.GreptimeDBComponentName: common.ResourceName(name, v1alpha1.StandaloneKind), + } + + if additionalLabels != nil { + matchedLabels = util.MergeStringMap(matchedLabels, additionalLabels) + } + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(standalone.Name, v1alpha1.StandaloneKind), - }, + MatchLabels: matchedLabels, }) if err != nil { return err } - pvcList := new(corev1.PersistentVolumeClaimList) + claims := new(corev1.PersistentVolumeClaimList) - err = d.List(ctx, pvcList, client.InNamespace(standalone.Namespace), client.MatchingLabelsSelector{Selector: selector}) + err = d.List(ctx, claims, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}) if errors.IsNotFound(err) { return nil } @@ -162,7 +179,7 @@ func (d *StandaloneDeployer) deleteStorage(ctx context.Context, standalone *v1al return err } - for _, pvc := range pvcList.Items { + for _, pvc := range claims.Items { klog.Infof("Deleting standalone PVC: %s", pvc.Name) if err := d.Delete(ctx, &pvc); err != nil { return err @@ -179,9 +196,9 @@ type standaloneBuilder struct { *deployer.DefaultBuilder } -func (s *standaloneBuilder) BuildService() deployer.Builder { - if s.Err != nil { - return s +func (b *standaloneBuilder) BuildService() deployer.Builder { + if b.Err != nil { + return b } svc := &corev1.Service{ @@ -190,128 +207,119 @@ func (s *standaloneBuilder) BuildService() deployer.Builder { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Namespace: s.standalone.Namespace, - Name: common.ResourceName(s.standalone.Name, v1alpha1.StandaloneKind), - Annotations: s.standalone.Spec.Service.Annotations, - Labels: s.standalone.Spec.Service.Labels, + Namespace: b.standalone.Namespace, + Name: common.ResourceName(b.standalone.Name, v1alpha1.StandaloneKind), + Annotations: b.standalone.Spec.Service.Annotations, + Labels: b.standalone.Spec.Service.Labels, }, Spec: corev1.ServiceSpec{ - Type: s.standalone.Spec.Service.Type, + Type: b.standalone.Spec.Service.Type, Selector: map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(s.standalone.Name, v1alpha1.StandaloneKind), + constant.GreptimeDBComponentName: common.ResourceName(b.standalone.Name, v1alpha1.StandaloneKind), }, - Ports: s.servicePorts(), - LoadBalancerClass: s.standalone.Spec.Service.LoadBalancerClass, + Ports: b.servicePorts(), + LoadBalancerClass: b.standalone.Spec.Service.LoadBalancerClass, }, } - s.Objects = append(s.Objects, svc) + b.Objects = append(b.Objects, svc) - return s + return b } -func (s *standaloneBuilder) BuildConfigMap() deployer.Builder { - if s.Err != nil { - return s +func (b *standaloneBuilder) BuildConfigMap() deployer.Builder { + if b.Err != nil { + return b } - configData, err := dbconfig.FromStandalone(s.standalone) + configData, err := dbconfig.FromStandalone(b.standalone) if err != nil { - s.Err = err - return s + b.Err = err + return b } - cm, err := common.GenerateConfigMap(s.standalone.Namespace, s.standalone.Name, v1alpha1.StandaloneKind, configData) + cm, err := common.GenerateConfigMap(b.standalone.Namespace, b.standalone.Name, v1alpha1.StandaloneKind, configData) if err != nil { - s.Err = err - return s + b.Err = err + return b } - s.Objects = append(s.Objects, cm) + b.Objects = append(b.Objects, cm) - return s + return b } -func (s *standaloneBuilder) BuildStatefulSet() deployer.Builder { - if s.Err != nil { - return s +func (b *standaloneBuilder) BuildStatefulSet() deployer.Builder { + if b.Err != nil { + return b } - // Always set replicas to 1 for standalone mode. - replicas := int32(1) - sts := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ Kind: "StatefulSet", APIVersion: "apps/v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: common.ResourceName(s.standalone.Name, v1alpha1.StandaloneKind), - Namespace: s.standalone.Namespace, + Name: common.ResourceName(b.standalone.Name, v1alpha1.StandaloneKind), + Namespace: b.standalone.Namespace, }, Spec: appsv1.StatefulSetSpec{ - Replicas: &replicas, + // Always set replicas to 1 for standalone mode. + Replicas: pointer.Int32(1), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(s.standalone.Name, v1alpha1.StandaloneKind), + constant.GreptimeDBComponentName: common.ResourceName(b.standalone.Name, v1alpha1.StandaloneKind), }, }, - Template: s.generatePodTemplateSpec(), - VolumeClaimTemplates: s.generatePVC(), + Template: b.generatePodTemplateSpec(), + VolumeClaimTemplates: b.generatePVCs(), }, } - configData, err := dbconfig.FromStandalone(s.standalone) + configData, err := dbconfig.FromStandalone(b.standalone) if err != nil { - s.Err = err - return s + b.Err = err + return b } sts.Spec.Template.Annotations = util.MergeStringMap(sts.Spec.Template.Annotations, map[string]string{deployer.ConfigHash: util.CalculateConfigHash(configData)}) - s.Objects = append(s.Objects, sts) + b.Objects = append(b.Objects, sts) - return s + return b } -func (s *standaloneBuilder) generatePodTemplateSpec() corev1.PodTemplateSpec { - template := common.GeneratePodTemplateSpec(v1alpha1.StandaloneKind, s.standalone.Spec.Base) +func (b *standaloneBuilder) generatePodTemplateSpec() corev1.PodTemplateSpec { + template := common.GeneratePodTemplateSpec(v1alpha1.StandaloneKind, b.standalone.Spec.Base) - if len(s.standalone.Spec.Base.MainContainer.Args) == 0 { + if len(b.standalone.Spec.Base.MainContainer.Args) == 0 { // Setup main container args. - template.Spec.Containers[constant.MainContainerIndex].Args = s.generateMainContainerArgs() + template.Spec.Containers[constant.MainContainerIndex].Args = b.generateMainContainerArgs() } - // Add Storage Dir. - template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = - append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, - corev1.VolumeMount{ - Name: s.standalone.Spec.LocalStorage.Name, - MountPath: s.standalone.Spec.LocalStorage.MountPath, - }, - ) + b.addVolumeMounts(template) - template.Spec.Containers[constant.MainContainerIndex].Ports = s.containerPorts() + template.Spec.Containers[constant.MainContainerIndex].Ports = b.containerPorts() template.ObjectMeta.Labels = util.MergeStringMap(template.ObjectMeta.Labels, map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(s.standalone.Name, v1alpha1.StandaloneKind), + constant.GreptimeDBComponentName: common.ResourceName(b.standalone.Name, v1alpha1.StandaloneKind), }) - common.MountConfigDir(s.standalone.Name, v1alpha1.StandaloneKind, template) + common.MountConfigDir(b.standalone.Name, v1alpha1.StandaloneKind, template) - if s.standalone.Spec.TLS != nil { - s.mountTLSSecret(template) + if b.standalone.Spec.TLS != nil { + b.mountTLSSecret(template) } return *template } -func (s *standaloneBuilder) mountTLSSecret(template *corev1.PodTemplateSpec) { +func (b *standaloneBuilder) mountTLSSecret(template *corev1.PodTemplateSpec) { template.Spec.Volumes = append(template.Spec.Volumes, corev1.Volume{ Name: constant.TLSVolumeName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: s.standalone.Spec.TLS.SecretName, + SecretName: b.standalone.Spec.TLS.SecretName, }, }, }) @@ -326,95 +334,127 @@ func (s *standaloneBuilder) mountTLSSecret(template *corev1.PodTemplateSpec) { ) } -func (s *standaloneBuilder) generatePVC() []corev1.PersistentVolumeClaim { - return []corev1.PersistentVolumeClaim{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: s.standalone.Spec.LocalStorage.Name, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - StorageClassName: s.standalone.Spec.LocalStorage.StorageClassName, - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse(s.standalone.Spec.LocalStorage.StorageSize), - }, - }, - }, - }, +func (b *standaloneBuilder) generatePVCs() []corev1.PersistentVolumeClaim { + var claims []corev1.PersistentVolumeClaim + + // It's always not nil because it's the default value. + if fs := b.standalone.GetDatanodeFileStorage(); fs != nil { + claims = append(claims, *common.FileStorageToPVC(fs, common.DatanodeFileStorageLabels)) } + + // Allocate the standalone WAL storage for the raft-engine. + if fs := b.standalone.GetWALProvider().GetRaftEngineWAL().GetFileStorage(); fs != nil { + claims = append(claims, *common.FileStorageToPVC(fs, common.WALFileStorageLabels)) + } + + // Allocate the standalone cache file storage for the datanode. + if fs := b.standalone.GetObjectStorageProvider().GetCacheFileStorage(); fs != nil { + claims = append(claims, *common.FileStorageToPVC(fs, common.CacheFileStorageLabels)) + } + + return claims } -func (s *standaloneBuilder) servicePorts() []corev1.ServicePort { +func (b *standaloneBuilder) servicePorts() []corev1.ServicePort { return []corev1.ServicePort{ { Name: "grpc", Protocol: corev1.ProtocolTCP, - Port: s.standalone.Spec.RPCPort, + Port: b.standalone.Spec.RPCPort, }, { Name: "http", Protocol: corev1.ProtocolTCP, - Port: s.standalone.Spec.HTTPServicePort, + Port: b.standalone.Spec.HTTPPort, }, { Name: "mysql", Protocol: corev1.ProtocolTCP, - Port: s.standalone.Spec.MySQLPort, + Port: b.standalone.Spec.MySQLPort, }, { Name: "postgres", Protocol: corev1.ProtocolTCP, - Port: s.standalone.Spec.PostgreSQLPort, + Port: b.standalone.Spec.PostgreSQLPort, }, } } -func (s *standaloneBuilder) containerPorts() []corev1.ContainerPort { +func (b *standaloneBuilder) containerPorts() []corev1.ContainerPort { return []corev1.ContainerPort{ { Name: "grpc", Protocol: corev1.ProtocolTCP, - ContainerPort: s.standalone.Spec.RPCPort, + ContainerPort: b.standalone.Spec.RPCPort, }, { Name: "http", Protocol: corev1.ProtocolTCP, - ContainerPort: s.standalone.Spec.HTTPServicePort, + ContainerPort: b.standalone.Spec.HTTPPort, }, { Name: "mysql", Protocol: corev1.ProtocolTCP, - ContainerPort: s.standalone.Spec.MySQLPort, + ContainerPort: b.standalone.Spec.MySQLPort, }, { Name: "postgres", Protocol: corev1.ProtocolTCP, - ContainerPort: s.standalone.Spec.PostgreSQLPort, + ContainerPort: b.standalone.Spec.PostgreSQLPort, }, } } -func (s *standaloneBuilder) generateMainContainerArgs() []string { +func (b *standaloneBuilder) generateMainContainerArgs() []string { var args = []string{ "standalone", "start", "--data-home", "/data", - "--rpc-addr", fmt.Sprintf("0.0.0.0:%d", s.standalone.Spec.RPCPort), - "--mysql-addr", fmt.Sprintf("0.0.0.0:%d", s.standalone.Spec.MySQLPort), - "--http-addr", fmt.Sprintf("0.0.0.0:%d", s.standalone.Spec.HTTPServicePort), - "--postgres-addr", fmt.Sprintf("0.0.0.0:%d", s.standalone.Spec.PostgreSQLPort), + "--rpc-addr", fmt.Sprintf("0.0.0.0:%d", b.standalone.Spec.RPCPort), + "--mysql-addr", fmt.Sprintf("0.0.0.0:%d", b.standalone.Spec.MySQLPort), + "--http-addr", fmt.Sprintf("0.0.0.0:%d", b.standalone.Spec.HTTPPort), + "--postgres-addr", fmt.Sprintf("0.0.0.0:%d", b.standalone.Spec.PostgreSQLPort), "--config-file", path.Join(constant.GreptimeDBConfigDir, constant.GreptimeDBConfigFileName), } - if s.standalone.Spec.TLS != nil { + if b.standalone.Spec.TLS != nil { args = append(args, []string{ "--tls-mode", "require", - "--tls-cert-path", path.Join(constant.GreptimeDBTLSDir, deployers.TLSCrtSecretKey), - "--tls-key-path", path.Join(constant.GreptimeDBTLSDir, deployers.TLSKeySecretKey), + "--tls-cert-path", path.Join(constant.GreptimeDBTLSDir, v1alpha1.TLSCrtSecretKey), + "--tls-key-path", path.Join(constant.GreptimeDBTLSDir, v1alpha1.TLSKeySecretKey), }...) } return args } + +func (b *standaloneBuilder) addVolumeMounts(template *corev1.PodTemplateSpec) { + if fs := b.standalone.GetDatanodeFileStorage(); fs != nil { + template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = + append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, + corev1.VolumeMount{ + Name: fs.GetName(), + MountPath: fs.GetMountPath(), + }, + ) + } + + if fs := b.standalone.GetWALProvider().GetRaftEngineWAL().GetFileStorage(); fs != nil { + template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = + append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, + corev1.VolumeMount{ + Name: fs.GetName(), + MountPath: fs.GetMountPath(), + }, + ) + } + + if fs := b.standalone.GetObjectStorageProvider().GetCacheFileStorage(); fs != nil { + template.Spec.Containers[constant.MainContainerIndex].VolumeMounts = + append(template.Spec.Containers[constant.MainContainerIndex].VolumeMounts, + corev1.VolumeMount{ + Name: fs.GetName(), + MountPath: fs.GetMountPath(), + }, + ) + } +} diff --git a/docs/api-references/docs.md b/docs/api-references/docs.md index 10b4a952..b2df40f7 100644 --- a/docs/api-references/docs.md +++ b/docs/api-references/docs.md @@ -15,13 +15,30 @@ +#### CacheStorage + + + +CacheStorage defines the cache storage specification. + + + +_Appears in:_ +- [ObjectStorageProviderSpec](#objectstorageproviderspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `fs` _[FileStorage](#filestorage)_ | Storage is the storage specification for the cache.
If the storage is not specified, the cache will use DatanodeStorageSpec. | | | +| `cacheCapacity` _string_ | CacheCapacity is the capacity of the cache. | | | + + #### ComponentSpec -ComponentSpec is the common specification for all components(frontend/meta/datanode). +ComponentSpec is the common specification for all components(`frontend`/`meta`/`datanode`/`flownode`). @@ -63,7 +80,7 @@ _Appears in:_ _Underlying type:_ _string_ - +ConditionType is the type of the condition. @@ -92,16 +109,16 @@ _Appears in:_ | `replicas` _integer_ | The number of replicas of the components. | | Minimum: 0
| | `config` _string_ | The content of the configuration file of the component in TOML format. | | | | `template` _[PodTemplateSpec](#podtemplatespec)_ | Template defines the pod template for the component, if not specified, the pod template will use the default value. | | | -| `rpcPort` _integer_ | The RPC port of the datanode. | | | -| `httpPort` _integer_ | The HTTP port of the datanode. | | | -| `storage` _[StorageSpec](#storagespec)_ | Storage is the storage specification for the datanode. | | | +| `rpcPort` _integer_ | RPCPort is the gRPC port of the datanode. | | | +| `httpPort` _integer_ | HTTPPort is the HTTP port of the datanode. | | | +| `storage` _[DatanodeStorageSpec](#datanodestoragespec)_ | Storage is the default file storage of the datanode. For example, WAL, cache, index etc. | | | #### DatanodeStatus - +DatanodeStatus is the status of datanode node. @@ -110,8 +127,50 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `replicas` _integer_ | | | | -| `readyReplicas` _integer_ | | | | +| `replicas` _integer_ | Replicas is the number of replicas of the datanode. | | | +| `readyReplicas` _integer_ | ReadyReplicas is the number of ready replicas of the datanode. | | | + + +#### DatanodeStorageSpec + + + +DatanodeStorageSpec defines the storage specification for the datanode. + + + +_Appears in:_ +- [DatanodeSpec](#datanodespec) +- [GreptimeDBStandaloneSpec](#greptimedbstandalonespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `dataHome` _string_ | DataHome is the home directory of the data. | | | +| `fs` _[FileStorage](#filestorage)_ | FileStorage is the file storage configuration. | | | + + +#### FileStorage + + + +FileStorage defines the file storage specification. It is used to generate the PVC that will be mounted to the container. + + + +_Appears in:_ +- [CacheStorage](#cachestorage) +- [DatanodeStorageSpec](#datanodestoragespec) +- [RaftEngineWAL](#raftenginewal) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Name is the name of the PVC that will be created. | | | +| `storageClassName` _string_ | StorageClassName is the name of the StorageClass to use for the PVC. | | | +| `storageSize` _string_ | StorageSize is the size of the storage. | | Pattern: `(^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$)`
| +| `mountPath` _string_ | MountPath is the path where the storage will be mounted in the container. | | | +| `storageRetainPolicy` _[StorageRetainPolicyType](#storageretainpolicytype)_ | StorageRetainPolicy is the policy of the storage. It can be `Retain` or `Delete`. | | Enum: [Retain Delete]
| + + #### FlownodeSpec @@ -137,7 +196,7 @@ _Appears in:_ - +FlownodeStatus is the status of flownode node. @@ -146,8 +205,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `replicas` _integer_ | | | | -| `readyReplicas` _integer_ | | | | +| `replicas` _integer_ | Replicas is the number of replicas of the flownode. | | | +| `readyReplicas` _integer_ | ReadyReplicas is the number of ready replicas of the flownode. | | | #### FrontendSpec @@ -166,15 +225,15 @@ _Appears in:_ | `replicas` _integer_ | The number of replicas of the components. | | Minimum: 0
| | `config` _string_ | The content of the configuration file of the component in TOML format. | | | | `template` _[PodTemplateSpec](#podtemplatespec)_ | Template defines the pod template for the component, if not specified, the pod template will use the default value. | | | -| `service` _[ServiceSpec](#servicespec)_ | | | | -| `tls` _[TLSSpec](#tlsspec)_ | The TLS configurations of the frontend. | | | +| `service` _[ServiceSpec](#servicespec)_ | Service is the service configuration of the frontend. | | | +| `tls` _[TLSSpec](#tlsspec)_ | TLS is the TLS configuration of the frontend. | | | #### FrontendStatus - +FrontendStatus is the status of frontend node. @@ -183,28 +242,28 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `replicas` _integer_ | | | | -| `readyReplicas` _integer_ | | | | +| `replicas` _integer_ | Replicas is the number of replicas of the frontend. | | | +| `readyReplicas` _integer_ | ReadyReplicas is the number of ready replicas of the frontend. | | | -#### GCSStorageProvider - +#### GCSStorage +GCSStorage defines the Google GCS storage specification. _Appears in:_ -- [ObjectStorageProvider](#objectstorageprovider) +- [ObjectStorageProviderSpec](#objectstorageproviderspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | | `bucket` _string_ | The data will be stored in the bucket. | | | | `root` _string_ | The gcs directory path. | | | +| `secretName` _string_ | The secret of storing Credentials for gcs service OAuth2 authentication.
The secret should contain keys named `service-account-key`.
The secret must be the same namespace with the GreptimeDBCluster resource. | | | | `scope` _string_ | The scope for gcs. | | | | `endpoint` _string_ | The endpoint URI of gcs service. | | | -| `secretName` _string_ | The secret of storing Credentials for gcs service OAuth2 authentication.
The secret must be the same namespace with the GreptimeDBCluster resource. | | | #### GreptimeDBCluster @@ -223,7 +282,7 @@ _Appears in:_ | `apiVersion` _string_ | `greptime.io/v1alpha1` | | | | `kind` _string_ | `GreptimeDBCluster` | | | | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | -| `spec` _[GreptimeDBClusterSpec](#greptimedbclusterspec)_ | | | | +| `spec` _[GreptimeDBClusterSpec](#greptimedbclusterspec)_ | Spec is the specification of the desired state of the GreptimeDBCluster. | | | #### GreptimeDBClusterList @@ -262,16 +321,15 @@ _Appears in:_ | `meta` _[MetaSpec](#metaspec)_ | Meta is the specification of meta node. | | | | `datanode` _[DatanodeSpec](#datanodespec)_ | Datanode is the specification of datanode node. | | | | `flownode` _[FlownodeSpec](#flownodespec)_ | Flownode is the specification of flownode node. | | | -| `httpPort` _integer_ | | | | -| `rpcPort` _integer_ | | | | -| `mysqlPort` _integer_ | | | | -| `postgreSQLPort` _integer_ | | | | -| `enableInfluxDBProtocol` _boolean_ | | | | -| `prometheusMonitor` _[PrometheusMonitorSpec](#prometheusmonitorspec)_ | | | | -| `version` _string_ | The version of greptimedb. | | | -| `initializer` _[InitializerSpec](#initializerspec)_ | | | | -| `objectStorage` _[ObjectStorageProvider](#objectstorageprovider)_ | | | | -| `remoteWal` _[RemoteWalProvider](#remotewalprovider)_ | | | | +| `httpPort` _integer_ | HTTPPort is the HTTP port of the greptimedb cluster. | | | +| `rpcPort` _integer_ | RPCPort is the RPC port of the greptimedb cluster. | | | +| `mysqlPort` _integer_ | MySQLPort is the MySQL port of the greptimedb cluster. | | | +| `postgreSQLPort` _integer_ | PostgreSQLPort is the PostgreSQL port of the greptimedb cluster. | | | +| `prometheusMonitor` _[PrometheusMonitorSpec](#prometheusmonitorspec)_ | PrometheusMonitor is the specification for creating PodMonitor or ServiceMonitor. | | | +| `version` _string_ | Version is the version of greptimedb. | | | +| `initializer` _[InitializerSpec](#initializerspec)_ | Initializer is the init container to set up components configurations before running the container. | | | +| `objectStorage` _[ObjectStorageProviderSpec](#objectstorageproviderspec)_ | ObjectStorageProvider is the storage provider for the greptimedb cluster. | | | +| `wal` _[WALProviderSpec](#walproviderspec)_ | WALProvider is the WAL provider for the greptimedb cluster. | | | @@ -292,7 +350,7 @@ _Appears in:_ | `apiVersion` _string_ | `greptime.io/v1alpha1` | | | | `kind` _string_ | `GreptimeDBStandalone` | | | | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | -| `spec` _[GreptimeDBStandaloneSpec](#greptimedbstandalonespec)_ | | | | +| `spec` _[GreptimeDBStandaloneSpec](#greptimedbstandalonespec)_ | Spec is the specification of the desired state of the GreptimeDBStandalone. | | | #### GreptimeDBStandaloneList @@ -327,20 +385,19 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `base` _[PodTemplateSpec](#podtemplatespec)_ | Base is the base pod template for all components and can be overridden by template of individual component. | | | -| `service` _[ServiceSpec](#servicespec)_ | | | | +| `service` _[ServiceSpec](#servicespec)_ | Service is the service configuration of greptimedb. | | | | `tls` _[TLSSpec](#tlsspec)_ | The TLS configurations of the greptimedb. | | | -| `httpPort` _integer_ | | | | -| `rpcPort` _integer_ | | | | -| `mysqlPort` _integer_ | | | | -| `postgreSQLPort` _integer_ | | | | -| `enableInfluxDBProtocol` _boolean_ | | | | -| `prometheusMonitor` _[PrometheusMonitorSpec](#prometheusmonitorspec)_ | | | | -| `version` _string_ | The version of greptimedb. | | | -| `initializer` _[InitializerSpec](#initializerspec)_ | | | | -| `objectStorage` _[ObjectStorageProvider](#objectstorageprovider)_ | | | | -| `localStorage` _[StorageSpec](#storagespec)_ | | | | -| `remoteWal` _[RemoteWalProvider](#remotewalprovider)_ | | | | -| `config` _string_ | | | | +| `httpPort` _integer_ | HTTPPort is the port of the greptimedb http service. | | | +| `rpcPort` _integer_ | RPCPort is the port of the greptimedb rpc service. | | | +| `mysqlPort` _integer_ | MySQLPort is the port of the greptimedb mysql service. | | | +| `postgreSQLPort` _integer_ | PostgreSQLPort is the port of the greptimedb postgresql service. | | | +| `prometheusMonitor` _[PrometheusMonitorSpec](#prometheusmonitorspec)_ | PrometheusMonitor is the specification for creating PodMonitor or ServiceMonitor. | | | +| `version` _string_ | Version is the version of the greptimedb. | | | +| `initializer` _[InitializerSpec](#initializerspec)_ | Initializer is the init container to set up components configurations before running the container. | | | +| `objectStorage` _[ObjectStorageProviderSpec](#objectstorageproviderspec)_ | ObjectStorageProvider is the storage provider for the greptimedb cluster. | | | +| `datanodeStorage` _[DatanodeStorageSpec](#datanodestoragespec)_ | DatanodeStorage is the default file storage of the datanode. For example, WAL, cache, index etc. | | | +| `wal` _[WALProviderSpec](#walproviderspec)_ | WALProvider is the WAL provider for the greptimedb cluster. | | | +| `config` _string_ | The content of the configuration file of the component in TOML format. | | | @@ -359,23 +416,23 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `image` _string_ | | | | +| `image` _string_ | The image of the initializer. | | | -#### KafkaRemoteWal +#### KafkaWAL -KafkaRemoteWal is the specification for remote WAL that uses Kafka. +KafkaWAL is the specification for Kafka remote WAL. _Appears in:_ -- [RemoteWalProvider](#remotewalprovider) +- [WALProviderSpec](#walproviderspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `brokerEndpoints` _string array_ | | | | +| `brokerEndpoints` _string array_ | BrokerEndpoints is the list of Kafka broker endpoints. | | | #### MainContainerSpec @@ -394,14 +451,14 @@ _Appears in:_ | --- | --- | --- | --- | | `image` _string_ | The main container image name of the component. | | | | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#resourcerequirements-v1-core)_ | The resource requirements of the main container. | | | -| `command` _string array_ | Entrypoint array. Not executed within a shell.
The container image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
Command field is from 'corev1.Container.Command'. | | | -| `args` _string array_ | Arguments to the entrypoint.
The container image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
of whether the variable exists or not. Cannot be updated.
More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
Args field is from 'corev1.Container.Args'. | | | -| `workingDir` _string_ | Container's working directory.
If not specified, the container runtime's default will be used, which
might be configured in the container image.
Cannot be updated.
WorkingDir field is from 'corev1.Container.WorkingDir'. | | | -| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#envvar-v1-core) array_ | List of environment variables to set in the container.
Cannot be updated.
Env field is from 'corev1.Container.Env'. | | | -| `livenessProbe` _[Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#probe-v1-core)_ | Periodic probe of container liveness.
Container will be restarted if the probe fails.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
LivenessProbe field is from 'corev1.Container.LivenessProbe'. | | | -| `readinessProbe` _[Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#probe-v1-core)_ | Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe fails.
ReadinessProbe field is from 'corev1.Container.LivenessProbe'.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes | | | -| `lifecycle` _[Lifecycle](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#lifecycle-v1-core)_ | Actions that the management system should take in response to container lifecycle events.
Cannot be updated.
Lifecycle field is from 'corev1.Container.Lifecycle'. | | | -| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#pullpolicy-v1-core)_ | Image pull policy.
One of Always, Never, IfNotPresent.
Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
Cannot be updated.
More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
ImagePullPolicy field is from 'corev1.Container.ImagePullPolicy'. | | | +| `command` _string array_ | Entrypoint array. Not executed within a shell.
The container image's ENTRYPOINT is used if this is not provided.
Variable references `$(VAR_NAME)` are expanded using the container's environment. If a variable
cannot be resolved, the reference in the input string will be unchanged. Double `$$` are reduced
to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. `$$(VAR_NAME)` will
produce the string literal `$(VAR_NAME)`. Escaped references will never be expanded, regardless
of whether the variable exists or not. Cannot be updated.
More info: `https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`
Command field is from `corev1.Container.Command`. | | | +| `args` _string array_ | Arguments to the entrypoint.
The container image's CMD is used if this is not provided.
Variable references `$(VAR_NAME)` are expanded using the container's environment. If a variable
cannot be resolved, the reference in the input string will be unchanged. Double `$$` are reduced
to a single `$`, which allows for escaping the `$(VAR_NAME)` syntax: i.e. `$$(VAR_NAME)` will
produce the string literal `$(VAR_NAME)`. Escaped references will never be expanded, regardless
of whether the variable exists or not. Cannot be updated.
More info: `https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`
Args field is from `corev1.Container.Args`. | | | +| `workingDir` _string_ | Container's working directory.
If not specified, the container runtime's default will be used, which
might be configured in the container image.
Cannot be updated.
WorkingDir field is from `corev1.Container.WorkingDir`. | | | +| `env` _[EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#envvar-v1-core) array_ | List of environment variables to set in the container.
Cannot be updated.
Env field is from `corev1.Container.Env`. | | | +| `livenessProbe` _[Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#probe-v1-core)_ | Periodic probe of container liveness.
Container will be restarted if the probe fails.
More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`
LivenessProbe field is from `corev1.Container.LivenessProbe`. | | | +| `readinessProbe` _[Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#probe-v1-core)_ | Periodic probe of container service readiness.
Container will be removed from service endpoints if the probe fails.
ReadinessProbe field is from `corev1.Container.LivenessProbe`.
More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes` | | | +| `lifecycle` _[Lifecycle](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#lifecycle-v1-core)_ | Actions that the management system should take in response to container lifecycle events.
Cannot be updated.
Lifecycle field is from `corev1.Container.Lifecycle`. | | | +| `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#pullpolicy-v1-core)_ | Image pull policy.
One of `Always`, `Never`, `IfNotPresent`.
Defaults to `Always` if `:latest` tag is specified, or IfNotPresent otherwise.
Cannot be updated.
More info: `https://kubernetes.io/docs/concepts/containers/images#updating-images`
ImagePullPolicy field is from `corev1.Container.ImagePullPolicy`. | | | | `volumeMounts` _[VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#volumemount-v1-core) array_ | Pod volumes to mount into the container's filesystem.
Cannot be updated. | | | @@ -421,19 +478,19 @@ _Appears in:_ | `replicas` _integer_ | The number of replicas of the components. | | Minimum: 0
| | `config` _string_ | The content of the configuration file of the component in TOML format. | | | | `template` _[PodTemplateSpec](#podtemplatespec)_ | Template defines the pod template for the component, if not specified, the pod template will use the default value. | | | -| `rpcPort` _integer_ | The RPC port of the meta. | | | -| `httpPort` _integer_ | The HTTP port of the meta. | | | -| `etcdEndpoints` _string array_ | | | | +| `rpcPort` _integer_ | RPCPort is the gRPC port of the meta. | | | +| `httpPort` _integer_ | HTTPPort is the HTTP port of the meta. | | | +| `etcdEndpoints` _string array_ | EtcdEndpoints is the endpoints of the etcd cluster. | | | | `enableCheckEtcdService` _boolean_ | EnableCheckEtcdService indicates whether to check etcd cluster health when starting meta. | | | | `enableRegionFailover` _boolean_ | EnableRegionFailover indicates whether to enable region failover. | | | -| `storeKeyPrefix` _string_ | The meta will store data with this key prefix. | | | +| `storeKeyPrefix` _string_ | StoreKeyPrefix is the prefix of the key in the etcd. We can use it to isolate the data of different clusters. | | | #### MetaStatus - +MetaStatus is the status of meta node. @@ -442,36 +499,36 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `replicas` _integer_ | | | | -| `readyReplicas` _integer_ | | | | -| `etcdEndpoints` _string array_ | | | | - +| `replicas` _integer_ | Replicas is the number of replicas of the meta. | | | +| `readyReplicas` _integer_ | ReadyReplicas is the number of ready replicas of the meta. | | | +| `etcdEndpoints` _string array_ | EtcdEndpoints is the endpoints of the etcd cluster. | | | -#### OSSStorageProvider +#### OSSStorage +OSSStorage defines the Aliyun OSS storage specification. _Appears in:_ -- [ObjectStorageProvider](#objectstorageprovider) +- [ObjectStorageProviderSpec](#objectstorageproviderspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | | `bucket` _string_ | The data will be stored in the bucket. | | | | `region` _string_ | The region of the bucket. | | | -| `endpoint` _string_ | The endpoint of the bucket. | | | -| `secretName` _string_ | The secret of storing the credentials of access key id and secret access key.
The secret must be the same namespace with the GreptimeDBCluster resource. | | | +| `secretName` _string_ | The secret of storing the credentials of access key id and secret access key.
The secret should contain keys named `access-key-id` and `secret-access-key`.
The secret must be the same namespace with the GreptimeDBCluster resource. | | | | `root` _string_ | The OSS directory path. | | | +| `endpoint` _string_ | The endpoint of the bucket. | | | -#### ObjectStorageProvider +#### ObjectStorageProviderSpec -ObjectStorageProvider defines the storage provider for the cluster. The data will be stored in the storage. +ObjectStorageProviderSpec defines the object storage provider for the cluster. The data will be stored in the storage. @@ -481,11 +538,10 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `s3` _[S3StorageProvider](#s3storageprovider)_ | | | | -| `oss` _[OSSStorageProvider](#ossstorageprovider)_ | | | | -| `gcs` _[GCSStorageProvider](#gcsstorageprovider)_ | | | | -| `cachePath` _string_ | | | | -| `cacheCapacity` _string_ | | | | +| `s3` _[S3Storage](#s3storage)_ | S3 is the S3 storage configuration. | | | +| `oss` _[OSSStorage](#ossstorage)_ | OSS is the Aliyun OSS storage configuration. | | | +| `gcs` _[GCSStorage](#gcsstorage)_ | GCS is the Google GCS storage configuration. | | | +| `cache` _[CacheStorage](#cachestorage)_ | Cache is the cache storage configuration for object storage. | | | #### Phase @@ -502,11 +558,11 @@ _Appears in:_ | Field | Description | | --- | --- | -| `Starting` | PhaseStarting means the controller start to create cluster.
| -| `Running` | PhaseRunning means all the components of cluster is ready.
| -| `Updating` | PhaseUpdating means the cluster is updating.
| +| `Starting` | PhaseStarting means the controller start to create cluster or standalone.
| +| `Running` | PhaseRunning means all the components of cluster or standalone is ready.
| +| `Updating` | PhaseUpdating means the cluster or standalone is updating.
| | `Error` | PhaseError means some kind of error happen in reconcile.
| -| `Terminating` | PhaseTerminating means the cluster is terminating.
| +| `Terminating` | PhaseTerminating means the cluster or standalone is terminating.
| #### PodTemplateSpec @@ -531,19 +587,19 @@ _Appears in:_ | `annotations` _object (keys:string, values:string)_ | The annotations to be created to the pod. | | | | `labels` _object (keys:string, values:string)_ | The labels to be created to the pod. | | | | `main` _[MainContainerSpec](#maincontainerspec)_ | MainContainer defines the specification of the main container of the pod. | | | -| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on that node.
More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
NodeSelector field is from 'corev1.PodSpec.NodeSelector'. | | | -| `initContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | List of initialization containers belonging to the pod.
Init containers are executed in order prior to containers being started. If any
init container fails, the pod is considered to have failed and is handled according
to its restartPolicy. The name for an init container or normal container must be
unique among all containers.
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
InitContainers field is from 'corev1.PodSpec.InitContainers'. | | | -| `restartPolicy` _[RestartPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#restartpolicy-v1-core)_ | Restart policy for all containers within the pod.
One of Always, OnFailure, Never.
Default to Always.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
RestartPolicy field is from 'corev1.PodSpec.RestartPolicy'. | | | -| `terminationGracePeriodSeconds` _integer_ | Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
Value must be non-negative integer. The value zero indicates stop immediately via
the kill signal (no opportunity to shut down).
If this value is nil, the default grace period will be used instead.
The grace period is the duration in seconds after the processes running in the pod are sent
a termination signal and the time when the processes are forcibly halted with a kill signal.
Set this value longer than the expected cleanup time for your process.
Defaults to 30 seconds.
TerminationGracePeriodSeconds field is from 'corev1.PodSpec.TerminationGracePeriodSeconds'. | | | -| `activeDeadlineSeconds` _integer_ | Optional duration in seconds the pod may be active on the node relative to
StartTime before the system will actively try to mark it failed and kill associated containers.
Value must be a positive integer.
ActiveDeadlineSeconds field is from 'corev1.PodSpec.ActiveDeadlineSeconds'. | | | -| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#dnspolicy-v1-core)_ | Set DNS policy for the pod.
Defaults to "ClusterFirst".
Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
To have DNS options set along with hostNetwork, you have to specify DNS policy
explicitly to 'ClusterFirstWithHostNet'.
DNSPolicy field is from 'corev1.PodSpec.DNSPolicy'. | | | -| `serviceAccountName` _string_ | ServiceAccountName is the name of the ServiceAccount to use to run this pod.
More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
ServiceAccountName field is from 'corev1.PodSpec.ServiceAccountName'. | | | -| `hostNetwork` _boolean_ | Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
Default to false.
HostNetwork field is from 'corev1.PodSpec.HostNetwork'. | | | -| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#localobjectreference-v1-core) array_ | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
If specified, these secrets will be passed to individual puller implementations for them to use.
More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets field is from 'corev1.PodSpec.ImagePullSecrets'. | | | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#affinity-v1-core)_ | If specified, the pod's scheduling constraints
Affinity field is from 'corev1.PodSpec.Affinity'. | | | +| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on that node.
More info: `https://kubernetes.io/docs/concepts/configuration/assign-pod-node/`
NodeSelector field is from `corev1.PodSpec.NodeSelector`. | | | +| `initContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | List of initialization containers belonging to the pod.
Init containers are executed in order prior to containers being started. If any
init container fails, the pod is considered to have failed and is handled according
to its restartPolicy. The name for an init container or normal container must be
unique among all containers.
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
More info: `https://kubernetes.io/docs/concepts/workloads/pods/init-containers/`
InitContainers field is from `corev1.PodSpec.InitContainers`. | | | +| `restartPolicy` _[RestartPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#restartpolicy-v1-core)_ | Restart policy for all containers within the pod.
One of `Always`, `OnFailure`, `Never`.
Default to `Always`.
More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy`
RestartPolicy field is from `corev1.PodSpec.RestartPolicy`. | | | +| `terminationGracePeriodSeconds` _integer_ | Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
Value must be non-negative integer. The value zero indicates stop immediately via
the kill signal (no opportunity to shut down).
If this value is nil, the default grace period will be used instead.
The grace period is the duration in seconds after the processes running in the pod are sent
a termination signal and the time when the processes are forcibly halted with a kill signal.
Set this value longer than the expected cleanup time for your process.
Defaults to 30 seconds.
TerminationGracePeriodSeconds field is from `corev1.PodSpec.TerminationGracePeriodSeconds`. | | | +| `activeDeadlineSeconds` _integer_ | Optional duration in seconds the pod may be active on the node relative to
StartTime before the system will actively try to mark it failed and kill associated containers.
Value must be a positive integer.
ActiveDeadlineSeconds field is from `corev1.PodSpec.ActiveDeadlineSeconds`. | | | +| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#dnspolicy-v1-core)_ | Set DNS policy for the pod.
Defaults to `ClusterFirst`.
Valid values are `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`.
DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
To have DNS options set along with hostNetwork, you have to specify DNS policy
explicitly to `ClusterFirstWithHostNet`.
DNSPolicy field is from `corev1.PodSpec.DNSPolicy`. | | | +| `serviceAccountName` _string_ | ServiceAccountName is the name of the ServiceAccount to use to run this pod.
More info: `https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/`
ServiceAccountName field is from `corev1.PodSpec.ServiceAccountName`. | | | +| `hostNetwork` _boolean_ | Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
Default to `false`.
HostNetwork field is from `corev1.PodSpec.HostNetwork`. | | | +| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#localobjectreference-v1-core) array_ | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
If specified, these secrets will be passed to individual puller implementations for them to use.
More info: `https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod`
ImagePullSecrets field is from `corev1.PodSpec.ImagePullSecrets`. | | | +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#affinity-v1-core)_ | If specified, the pod's scheduling constraints
Affinity field is from `corev1.PodSpec.Affinity`. | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#toleration-v1-core) array_ | If specified, the pod's tolerations. | | | -| `schedulerName` _string_ | If specified, the pod will be dispatched by specified scheduler.
If not specified, the pod will be dispatched by default scheduler.
SchedulerName field is from 'corev1.PodSpec.SchedulerName'. | | | -| `additionalContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | For most time, there is one main container in a pod(frontend/meta/datanode).
If specified, additional containers will be added to the pod as sidecar containers. | | | +| `schedulerName` _string_ | If specified, the pod will be dispatched by specified scheduler.
If not specified, the pod will be dispatched by default scheduler.
SchedulerName field is from `corev1.PodSpec.SchedulerName`. | | | +| `additionalContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | For most time, there is one main container in a pod(`frontend`/`meta`/`datanode`/`flownode`).
If specified, additional containers will be added to the pod as sidecar containers. | | | | `volumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#volume-v1-core) array_ | List of volumes that can be mounted by containers belonging to the pod. | | | @@ -561,53 +617,52 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enabled` _boolean_ | Enable a Prometheus PodMonitor | | | -| `labels` _object (keys:string, values:string)_ | Prometheus PodMonitor labels. | | | -| `interval` _string_ | Interval at which metrics should be scraped | | | +| `enabled` _boolean_ | Enabled indicates whether the PodMonitor is enabled. | | | +| `labels` _object (keys:string, values:string)_ | Labels is the labels for the PodMonitor. | | | +| `interval` _string_ | Interval is the scape interval for the PodMonitor. | | | -#### RemoteWalProvider +#### RaftEngineWAL -RemoteWalProvider defines the remote wal provider for the cluster. +RaftEngineWAL is the specification for local WAL that uses raft-engine. _Appears in:_ -- [GreptimeDBClusterSpec](#greptimedbclusterspec) -- [GreptimeDBStandaloneSpec](#greptimedbstandalonespec) +- [WALProviderSpec](#walproviderspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `kafka` _[KafkaRemoteWal](#kafkaremotewal)_ | | | | +| `fs` _[FileStorage](#filestorage)_ | FileStorage is the file storage configuration for the raft-engine WAL.
If the file storage is not specified, WAL will use DatanodeStorageSpec. | | | -#### S3StorageProvider - +#### S3Storage +S3Storage defines the S3 storage specification. _Appears in:_ -- [ObjectStorageProvider](#objectstorageprovider) +- [ObjectStorageProviderSpec](#objectstorageproviderspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | | `bucket` _string_ | The data will be stored in the bucket. | | | | `region` _string_ | The region of the bucket. | | | -| `endpoint` _string_ | The endpoint of the bucket. | | | -| `secretName` _string_ | The secret of storing the credentials of access key id and secret access key.
The secret must be the same namespace with the GreptimeDBCluster resource. | | | +| `secretName` _string_ | The secret of storing the credentials of access key id and secret access key.
The secret should contain keys named `access-key-id` and `secret-access-key`.
The secret must be the same namespace with the GreptimeDBCluster resource. | | | | `root` _string_ | The S3 directory path. | | | +| `endpoint` _string_ | The endpoint of the bucket. | | | #### ServiceSpec - +ServiceSpec defines the service configuration for the component. @@ -617,10 +672,10 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#servicetype-v1-core)_ | type determines how the Service is exposed. | | | -| `annotations` _object (keys:string, values:string)_ | Additional annotations for the service | | | -| `labels` _object (keys:string, values:string)_ | Additional labels for the service | | | -| `loadBalancerClass` _string_ | loadBalancerClass is the class of the load balancer implementation this Service belongs to. | | | +| `type` _[ServiceType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#servicetype-v1-core)_ | Type is the type of the service. | | | +| `annotations` _object (keys:string, values:string)_ | Annotations is the annotations for the service. | | | +| `labels` _object (keys:string, values:string)_ | Labels is the labels for the service. | | | +| `loadBalancerClass` _string_ | LoadBalancerClass is the class of the load balancer. | | | #### SlimPodSpec @@ -628,7 +683,7 @@ _Appears in:_ SlimPodSpec is a slimmed down version of corev1.PodSpec. -Most of the fields in SlimPodSpec are copied from corev1.PodSpec. +Most of the fields in SlimPodSpec are copied from `corev1.PodSpec`. @@ -637,19 +692,19 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on that node.
More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
NodeSelector field is from 'corev1.PodSpec.NodeSelector'. | | | -| `initContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | List of initialization containers belonging to the pod.
Init containers are executed in order prior to containers being started. If any
init container fails, the pod is considered to have failed and is handled according
to its restartPolicy. The name for an init container or normal container must be
unique among all containers.
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
InitContainers field is from 'corev1.PodSpec.InitContainers'. | | | -| `restartPolicy` _[RestartPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#restartpolicy-v1-core)_ | Restart policy for all containers within the pod.
One of Always, OnFailure, Never.
Default to Always.
More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
RestartPolicy field is from 'corev1.PodSpec.RestartPolicy'. | | | -| `terminationGracePeriodSeconds` _integer_ | Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
Value must be non-negative integer. The value zero indicates stop immediately via
the kill signal (no opportunity to shut down).
If this value is nil, the default grace period will be used instead.
The grace period is the duration in seconds after the processes running in the pod are sent
a termination signal and the time when the processes are forcibly halted with a kill signal.
Set this value longer than the expected cleanup time for your process.
Defaults to 30 seconds.
TerminationGracePeriodSeconds field is from 'corev1.PodSpec.TerminationGracePeriodSeconds'. | | | -| `activeDeadlineSeconds` _integer_ | Optional duration in seconds the pod may be active on the node relative to
StartTime before the system will actively try to mark it failed and kill associated containers.
Value must be a positive integer.
ActiveDeadlineSeconds field is from 'corev1.PodSpec.ActiveDeadlineSeconds'. | | | -| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#dnspolicy-v1-core)_ | Set DNS policy for the pod.
Defaults to "ClusterFirst".
Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
To have DNS options set along with hostNetwork, you have to specify DNS policy
explicitly to 'ClusterFirstWithHostNet'.
DNSPolicy field is from 'corev1.PodSpec.DNSPolicy'. | | | -| `serviceAccountName` _string_ | ServiceAccountName is the name of the ServiceAccount to use to run this pod.
More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
ServiceAccountName field is from 'corev1.PodSpec.ServiceAccountName'. | | | -| `hostNetwork` _boolean_ | Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
Default to false.
HostNetwork field is from 'corev1.PodSpec.HostNetwork'. | | | -| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#localobjectreference-v1-core) array_ | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
If specified, these secrets will be passed to individual puller implementations for them to use.
More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
ImagePullSecrets field is from 'corev1.PodSpec.ImagePullSecrets'. | | | -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#affinity-v1-core)_ | If specified, the pod's scheduling constraints
Affinity field is from 'corev1.PodSpec.Affinity'. | | | +| `nodeSelector` _object (keys:string, values:string)_ | NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node's labels for the pod to be scheduled on that node.
More info: `https://kubernetes.io/docs/concepts/configuration/assign-pod-node/`
NodeSelector field is from `corev1.PodSpec.NodeSelector`. | | | +| `initContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | List of initialization containers belonging to the pod.
Init containers are executed in order prior to containers being started. If any
init container fails, the pod is considered to have failed and is handled according
to its restartPolicy. The name for an init container or normal container must be
unique among all containers.
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
More info: `https://kubernetes.io/docs/concepts/workloads/pods/init-containers/`
InitContainers field is from `corev1.PodSpec.InitContainers`. | | | +| `restartPolicy` _[RestartPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#restartpolicy-v1-core)_ | Restart policy for all containers within the pod.
One of `Always`, `OnFailure`, `Never`.
Default to `Always`.
More info: `https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy`
RestartPolicy field is from `corev1.PodSpec.RestartPolicy`. | | | +| `terminationGracePeriodSeconds` _integer_ | Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
Value must be non-negative integer. The value zero indicates stop immediately via
the kill signal (no opportunity to shut down).
If this value is nil, the default grace period will be used instead.
The grace period is the duration in seconds after the processes running in the pod are sent
a termination signal and the time when the processes are forcibly halted with a kill signal.
Set this value longer than the expected cleanup time for your process.
Defaults to 30 seconds.
TerminationGracePeriodSeconds field is from `corev1.PodSpec.TerminationGracePeriodSeconds`. | | | +| `activeDeadlineSeconds` _integer_ | Optional duration in seconds the pod may be active on the node relative to
StartTime before the system will actively try to mark it failed and kill associated containers.
Value must be a positive integer.
ActiveDeadlineSeconds field is from `corev1.PodSpec.ActiveDeadlineSeconds`. | | | +| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#dnspolicy-v1-core)_ | Set DNS policy for the pod.
Defaults to `ClusterFirst`.
Valid values are `ClusterFirstWithHostNet`, `ClusterFirst`, `Default` or `None`.
DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
To have DNS options set along with hostNetwork, you have to specify DNS policy
explicitly to `ClusterFirstWithHostNet`.
DNSPolicy field is from `corev1.PodSpec.DNSPolicy`. | | | +| `serviceAccountName` _string_ | ServiceAccountName is the name of the ServiceAccount to use to run this pod.
More info: `https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/`
ServiceAccountName field is from `corev1.PodSpec.ServiceAccountName`. | | | +| `hostNetwork` _boolean_ | Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
Default to `false`.
HostNetwork field is from `corev1.PodSpec.HostNetwork`. | | | +| `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#localobjectreference-v1-core) array_ | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
If specified, these secrets will be passed to individual puller implementations for them to use.
More info: `https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod`
ImagePullSecrets field is from `corev1.PodSpec.ImagePullSecrets`. | | | +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#affinity-v1-core)_ | If specified, the pod's scheduling constraints
Affinity field is from `corev1.PodSpec.Affinity`. | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#toleration-v1-core) array_ | If specified, the pod's tolerations. | | | -| `schedulerName` _string_ | If specified, the pod will be dispatched by specified scheduler.
If not specified, the pod will be dispatched by default scheduler.
SchedulerName field is from 'corev1.PodSpec.SchedulerName'. | | | -| `additionalContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | For most time, there is one main container in a pod(frontend/meta/datanode).
If specified, additional containers will be added to the pod as sidecar containers. | | | +| `schedulerName` _string_ | If specified, the pod will be dispatched by specified scheduler.
If not specified, the pod will be dispatched by default scheduler.
SchedulerName field is from `corev1.PodSpec.SchedulerName`. | | | +| `additionalContainers` _[Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#container-v1-core) array_ | For most time, there is one main container in a pod(`frontend`/`meta`/`datanode`/`flownode`).
If specified, additional containers will be added to the pod as sidecar containers. | | | | `volumes` _[Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v/#volume-v1-core) array_ | List of volumes that can be mounted by containers belonging to the pod. | | | @@ -657,12 +712,12 @@ _Appears in:_ _Underlying type:_ _string_ - +StorageRetainPolicyType is the type of the storage retain policy. _Appears in:_ -- [StorageSpec](#storagespec) +- [FileStorage](#filestorage) | Field | Description | | --- | --- | @@ -670,43 +725,38 @@ _Appears in:_ | `Delete` | StorageRetainPolicyTypeDelete specify that the storage will be deleted when the associated StatefulSet delete.
| -#### StorageSpec +#### TLSSpec -StorageSpec will generate PVC. +TLSSpec defines the TLS configurations for the component. _Appears in:_ -- [DatanodeSpec](#datanodespec) +- [FrontendSpec](#frontendspec) - [GreptimeDBStandaloneSpec](#greptimedbstandalonespec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | The name of the storage. | | | -| `storageClassName` _string_ | The name of the storage class to use for the volume. | | | -| `storageSize` _string_ | The size of the storage. | | Pattern: `(^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$)`
| -| `mountPath` _string_ | The mount path of the storage in datanode container. | | | -| `storageRetainPolicy` _[StorageRetainPolicyType](#storageretainpolicytype)_ | The PVCs will retain or delete when the cluster is deleted, default to Retain. | | Enum: [Retain Delete]
| -| `walDir` _string_ | The wal directory of the storage. | | | -| `dataHome` _string_ | The datahome directory. | | | +| `secretName` _string_ | SecretName is the name of the secret that contains the TLS certificates.
The secret must be in the same namespace with the greptime resource.
The secret must contain keys named `tls.crt` and `tls.key`. | | | -#### TLSSpec - +#### WALProviderSpec +WALProviderSpec defines the WAL provider for the cluster. _Appears in:_ -- [FrontendSpec](#frontendspec) +- [GreptimeDBClusterSpec](#greptimedbclusterspec) - [GreptimeDBStandaloneSpec](#greptimedbstandalonespec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `secretName` _string_ | The secret name of the TLS certificate, and it must be in the same namespace of the cluster.
The secret must contain keys named ca.crt, tls.crt and tls.key. | | | +| `raftEngine` _[RaftEngineWAL](#raftenginewal)_ | RaftEngineWAL is the specification for local WAL that uses raft-engine. | | | +| `kafka` _[KafkaWAL](#kafkawal)_ | KafkaWAL is the specification for remote WAL that uses Kafka. | | | diff --git a/examples/README.md b/examples/README.md index c5841113..ea887ee7 100644 --- a/examples/README.md +++ b/examples/README.md @@ -14,6 +14,7 @@ The following examples suppose that you have installed the etcd cluster in the ` - [Kafka Remote WAL](./cluster/kafka-remote-wal/cluster.yaml): Create a GreptimeDB cluster with Kafka remote WAL. Please ensure you have installed the Kafka cluster in the `kafka` namespace with the service endpoint `kafka-bootstrap.kafka.svc.cluster.local:9092`. - [Add Custom Config](./cluster/add-custom-config/cluster.yaml): Create a GreptimeDB cluster with custom configuration by using the `config` field. - [AWS NLB](./cluster/aws-nlb/cluster.yaml): Create a GreptimeDB cluster with the AWS NLB service. Please ensure you have already configured it. +- [Standalone WAL](./cluster/standalone-wal/cluster.yaml): Create a GreptimeDB cluster with standalone storage for WAL. ## Standalone diff --git a/examples/cluster/kafka-remote-wal/cluster.yaml b/examples/cluster/kafka-remote-wal/cluster.yaml index 4f17e8ca..331a96be 100644 --- a/examples/cluster/kafka-remote-wal/cluster.yaml +++ b/examples/cluster/kafka-remote-wal/cluster.yaml @@ -14,7 +14,7 @@ spec: - "etcd.etcd-cluster.svc.cluster.local:2379" datanode: replicas: 1 - remoteWal: + wal: kafka: brokerEndpoints: - "kafka-bootstrap.kafka.svc.cluster.local:9092" diff --git a/examples/cluster/standalone-wal/cluster.yaml b/examples/cluster/standalone-wal/cluster.yaml new file mode 100644 index 00000000..9934f1e4 --- /dev/null +++ b/examples/cluster/standalone-wal/cluster.yaml @@ -0,0 +1,23 @@ +apiVersion: greptime.io/v1alpha1 +kind: GreptimeDBCluster +metadata: + name: cluster-with-standalone-wal +spec: + base: + main: + image: greptime/greptimedb:latest + frontend: + replicas: 1 + meta: + replicas: 1 + etcdEndpoints: + - "etcd.etcd-cluster:2379" + datanode: + replicas: 1 + wal: + raftEngine: + fs: + storageClassName: io2 # Use io2 storage class for WAL for better performance. + name: wal + storageSize: 5Gi + mountPath: /wal diff --git a/manifests/bundle.yaml b/manifests/bundle.yaml index 8b3d3fd7..bdd25189 100644 --- a/manifests/bundle.yaml +++ b/manifests/bundle.yaml @@ -2836,22 +2836,23 @@ spec: properties: dataHome: type: string - mountPath: - type: string - name: - type: string - storageClassName: - type: string - storageRetainPolicy: - enum: - - Retain - - Delete - type: string - storageSize: - pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) - type: string - walDir: - type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object type: object template: properties: @@ -5616,8 +5617,6 @@ spec: type: array type: object type: object - enableInfluxDBProtocol: - type: boolean flownode: properties: config: @@ -11181,6 +11180,8 @@ spec: properties: secretName: type: string + required: + - secretName type: object type: object httpPort: @@ -13977,16 +13978,36 @@ spec: type: object type: array type: object + required: + - etcdEndpoints type: object mysqlPort: format: int32 type: integer objectStorage: properties: - cacheCapacity: - type: string - cachePath: - type: string + cache: + properties: + cacheCapacity: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object gcs: properties: bucket: @@ -13999,6 +14020,9 @@ spec: type: string secretName: type: string + required: + - bucket + - root type: object oss: properties: @@ -14012,6 +14036,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object s3: properties: @@ -14025,6 +14053,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object type: object postgreSQLPort: @@ -14040,8 +14072,15 @@ spec: additionalProperties: type: string type: object + required: + - enabled type: object - remoteWal: + rpcPort: + format: int32 + type: integer + version: + type: string + wal: properties: kafka: properties: @@ -14049,13 +14088,34 @@ spec: items: type: string type: array + required: + - brokerEndpoints + type: object + raftEngine: + properties: + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object type: object type: object - rpcPort: - format: int32 - type: integer - version: - type: string + required: + - datanode + - frontend + - meta type: object status: properties: @@ -16950,8 +17010,28 @@ spec: type: object config: type: string - enableInfluxDBProtocol: - type: boolean + datanodeStorage: + properties: + dataHome: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object httpPort: format: int32 type: integer @@ -16960,36 +17040,33 @@ spec: image: type: string type: object - localStorage: - properties: - dataHome: - type: string - mountPath: - type: string - name: - type: string - storageClassName: - type: string - storageRetainPolicy: - enum: - - Retain - - Delete - type: string - storageSize: - pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) - type: string - walDir: - type: string - type: object mysqlPort: format: int32 type: integer objectStorage: properties: - cacheCapacity: - type: string - cachePath: - type: string + cache: + properties: + cacheCapacity: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object gcs: properties: bucket: @@ -17002,6 +17079,9 @@ spec: type: string secretName: type: string + required: + - bucket + - root type: object oss: properties: @@ -17015,6 +17095,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object s3: properties: @@ -17028,6 +17112,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object type: object postgreSQLPort: @@ -17043,16 +17131,8 @@ spec: additionalProperties: type: string type: object - type: object - remoteWal: - properties: - kafka: - properties: - brokerEndpoints: - items: - type: string - type: array - type: object + required: + - enabled type: object rpcPort: format: int32 @@ -17076,9 +17156,43 @@ spec: properties: secretName: type: string + required: + - secretName type: object version: type: string + wal: + properties: + kafka: + properties: + brokerEndpoints: + items: + type: string + type: array + required: + - brokerEndpoints + type: object + raftEngine: + properties: + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object + type: object type: object status: properties: diff --git a/manifests/crds.yaml b/manifests/crds.yaml index 2bca8490..e38c8c0c 100644 --- a/manifests/crds.yaml +++ b/manifests/crds.yaml @@ -2829,22 +2829,23 @@ spec: properties: dataHome: type: string - mountPath: - type: string - name: - type: string - storageClassName: - type: string - storageRetainPolicy: - enum: - - Retain - - Delete - type: string - storageSize: - pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) - type: string - walDir: - type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object type: object template: properties: @@ -5609,8 +5610,6 @@ spec: type: array type: object type: object - enableInfluxDBProtocol: - type: boolean flownode: properties: config: @@ -11174,6 +11173,8 @@ spec: properties: secretName: type: string + required: + - secretName type: object type: object httpPort: @@ -13970,16 +13971,36 @@ spec: type: object type: array type: object + required: + - etcdEndpoints type: object mysqlPort: format: int32 type: integer objectStorage: properties: - cacheCapacity: - type: string - cachePath: - type: string + cache: + properties: + cacheCapacity: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object gcs: properties: bucket: @@ -13992,6 +14013,9 @@ spec: type: string secretName: type: string + required: + - bucket + - root type: object oss: properties: @@ -14005,6 +14029,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object s3: properties: @@ -14018,6 +14046,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object type: object postgreSQLPort: @@ -14033,8 +14065,15 @@ spec: additionalProperties: type: string type: object + required: + - enabled type: object - remoteWal: + rpcPort: + format: int32 + type: integer + version: + type: string + wal: properties: kafka: properties: @@ -14042,13 +14081,34 @@ spec: items: type: string type: array + required: + - brokerEndpoints + type: object + raftEngine: + properties: + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object type: object type: object - rpcPort: - format: int32 - type: integer - version: - type: string + required: + - datanode + - frontend + - meta type: object status: properties: @@ -16943,8 +17003,28 @@ spec: type: object config: type: string - enableInfluxDBProtocol: - type: boolean + datanodeStorage: + properties: + dataHome: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object httpPort: format: int32 type: integer @@ -16953,36 +17033,33 @@ spec: image: type: string type: object - localStorage: - properties: - dataHome: - type: string - mountPath: - type: string - name: - type: string - storageClassName: - type: string - storageRetainPolicy: - enum: - - Retain - - Delete - type: string - storageSize: - pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) - type: string - walDir: - type: string - type: object mysqlPort: format: int32 type: integer objectStorage: properties: - cacheCapacity: - type: string - cachePath: - type: string + cache: + properties: + cacheCapacity: + type: string + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object gcs: properties: bucket: @@ -16995,6 +17072,9 @@ spec: type: string secretName: type: string + required: + - bucket + - root type: object oss: properties: @@ -17008,6 +17088,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object s3: properties: @@ -17021,6 +17105,10 @@ spec: type: string secretName: type: string + required: + - bucket + - region + - root type: object type: object postgreSQLPort: @@ -17036,16 +17124,8 @@ spec: additionalProperties: type: string type: object - type: object - remoteWal: - properties: - kafka: - properties: - brokerEndpoints: - items: - type: string - type: array - type: object + required: + - enabled type: object rpcPort: format: int32 @@ -17069,9 +17149,43 @@ spec: properties: secretName: type: string + required: + - secretName type: object version: type: string + wal: + properties: + kafka: + properties: + brokerEndpoints: + items: + type: string + type: array + required: + - brokerEndpoints + type: object + raftEngine: + properties: + fs: + properties: + mountPath: + type: string + name: + type: string + storageClassName: + type: string + storageRetainPolicy: + enum: + - Retain + - Delete + type: string + storageSize: + pattern: (^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$) + type: string + type: object + type: object + type: object type: object status: properties: diff --git a/pkg/dbconfig/datanode_config.go b/pkg/dbconfig/datanode_config.go index 864fe6cd..d9ef7b92 100644 --- a/pkg/dbconfig/datanode_config.go +++ b/pkg/dbconfig/datanode_config.go @@ -108,23 +108,24 @@ func (c *DatanodeConfig) ConfigureByCluster(cluster *v1alpha1.GreptimeDBCluster) } } - if cluster.Spec.Datanode != nil { - c.WalDir = util.StringPtr(cluster.Spec.Datanode.Storage.WalDir) - c.StorageDataHome = util.StringPtr(cluster.Spec.Datanode.Storage.DataHome) + // Set the wal dir if the kafka wal is not enabled. + if cluster.GetWALProvider().GetKafkaWAL() == nil && cluster.GetWALDir() != "" { + c.WalDir = util.StringPtr(cluster.GetWALDir()) + } - if len(cluster.Spec.Datanode.Config) > 0 { - if err := c.SetInputConfig(cluster.Spec.Datanode.Config); err != nil { - return err - } + if cluster.GetDatanode().GetDataHome() != "" { + c.StorageDataHome = util.StringPtr(cluster.GetDatanode().GetDataHome()) + } + + if cluster.GetDatanode().GetConfig() != "" { + if err := c.SetInputConfig(cluster.GetDatanode().GetConfig()); err != nil { + return err } } - if cluster.Spec.RemoteWalProvider != nil && cluster.Spec.RemoteWalProvider.KafkaRemoteWal != nil { + if cluster.GetWALProvider().GetKafkaWAL() != nil { c.WalProvider = util.StringPtr("kafka") - c.WalBrokerEndpoints = cluster.Spec.RemoteWalProvider.KafkaRemoteWal.BrokerEndpoints - - // FIXME(zyy17): Unset the wal dir if the wal provider is kafka. It's a temporary solution. - c.WalDir = nil + c.WalBrokerEndpoints = cluster.GetWALProvider().GetKafkaWAL().GetBrokerEndpoints() } return nil diff --git a/pkg/dbconfig/dbconfig.go b/pkg/dbconfig/dbconfig.go index 0b1a8804..fb9d8bfa 100644 --- a/pkg/dbconfig/dbconfig.go +++ b/pkg/dbconfig/dbconfig.go @@ -108,12 +108,6 @@ func FromStandalone(standalone *v1alpha1.GreptimeDBStandalone) ([]byte, error) { return Marshal(cfg) } -const ( - AccessKeyIDSecretKey = "access-key-id" - SecretAccessKeySecretKey = "secret-access-key" - ServiceAccountKey = "service-account-key" -) - func getServiceAccountKey(namespace, name string) (secretAccessKey []byte, err error) { var secret corev1.Secret if err = k8sutil.GetK8sResource(namespace, name, &secret); err != nil { @@ -125,9 +119,9 @@ func getServiceAccountKey(namespace, name string) (secretAccessKey []byte, err e return } - secretAccessKey = secret.Data[ServiceAccountKey] + secretAccessKey = secret.Data[v1alpha1.ServiceAccountKey] if secretAccessKey == nil { - err = fmt.Errorf("secret '%s/%s' does not have service account key '%s'", namespace, name, ServiceAccountKey) + err = fmt.Errorf("secret '%s/%s' does not have service account key '%s'", namespace, name, v1alpha1.ServiceAccountKey) return } return @@ -145,15 +139,15 @@ func getOCSCredentials(namespace, name string) (accessKeyID, secretAccessKey []b return } - accessKeyID = ocsCredentials.Data[AccessKeyIDSecretKey] + accessKeyID = ocsCredentials.Data[v1alpha1.AccessKeyIDSecretKey] if accessKeyID == nil { - err = fmt.Errorf("secret '%s/%s' does not have access key id '%s'", namespace, name, AccessKeyIDSecretKey) + err = fmt.Errorf("secret '%s/%s' does not have access key id '%s'", namespace, name, v1alpha1.AccessKeyIDSecretKey) return } - secretAccessKey = ocsCredentials.Data[SecretAccessKeySecretKey] + secretAccessKey = ocsCredentials.Data[v1alpha1.SecretAccessKeySecretKey] if secretAccessKey == nil { - err = fmt.Errorf("secret '%s/%s' does not have secret access key '%s'", namespace, name, SecretAccessKeySecretKey) + err = fmt.Errorf("secret '%s/%s' does not have secret access key '%s'", namespace, name, v1alpha1.SecretAccessKeySecretKey) return } diff --git a/pkg/dbconfig/dbconfig_test.go b/pkg/dbconfig/dbconfig_test.go index 3480ce17..1ece5dc0 100644 --- a/pkg/dbconfig/dbconfig_test.go +++ b/pkg/dbconfig/dbconfig_test.go @@ -30,14 +30,16 @@ func TestFromClusterForDatanodeConfig(t *testing.T) { Namespace: "default", }, Spec: v1alpha1.GreptimeDBClusterSpec{ - ObjectStorageProvider: &v1alpha1.ObjectStorageProvider{ - S3: &v1alpha1.S3StorageProvider{ - Root: "testcluster", - Bucket: "testbucket", + ObjectStorageProvider: &v1alpha1.ObjectStorageProviderSpec{ + S3: &v1alpha1.S3Storage{ + Root: "testcluster", + Bucket: "testbucket", + Endpoint: "s3.amazonaws.com", + Region: "us-west-2", }, }, - RemoteWalProvider: &v1alpha1.RemoteWalProvider{ - KafkaRemoteWal: &v1alpha1.KafkaRemoteWal{ + WALProvider: &v1alpha1.WALProviderSpec{ + KafkaWAL: &v1alpha1.KafkaWAL{ BrokerEndpoints: []string{ "broker1:9092", "broker2:9092", @@ -50,6 +52,8 @@ func TestFromClusterForDatanodeConfig(t *testing.T) { testConfig := ` [storage] bucket = "testbucket" + endpoint = "s3.amazonaws.com" + region = "us-west-2" root = "testcluster" type = "S3" @@ -79,10 +83,12 @@ level = 'error' Namespace: "default", }, Spec: v1alpha1.GreptimeDBClusterSpec{ - ObjectStorageProvider: &v1alpha1.ObjectStorageProvider{ - S3: &v1alpha1.S3StorageProvider{ - Root: "testcluster", - Bucket: "testbucket", + ObjectStorageProvider: &v1alpha1.ObjectStorageProviderSpec{ + S3: &v1alpha1.S3Storage{ + Root: "testcluster", + Bucket: "testbucket", + Endpoint: "s3.amazonaws.com", + Region: "us-west-2", }, }, Datanode: &v1alpha1.DatanodeSpec{ @@ -100,6 +106,8 @@ level = 'error' [storage] bucket = "testbucket" + endpoint = "s3.amazonaws.com" + region = "us-west-2" root = "testcluster" type = "S3" ` diff --git a/pkg/dbconfig/flownode_config.go b/pkg/dbconfig/flownode_config.go index 561e0df0..2f74e695 100644 --- a/pkg/dbconfig/flownode_config.go +++ b/pkg/dbconfig/flownode_config.go @@ -32,11 +32,12 @@ type FlownodeConfig struct { // ConfigureByCluster configures the datanode config by the given cluster. func (c *FlownodeConfig) ConfigureByCluster(cluster *v1alpha1.GreptimeDBCluster) error { - if cluster.Spec.Flownode != nil && len(cluster.Spec.Flownode.Config) > 0 { - if err := c.SetInputConfig(cluster.Spec.Flownode.Config); err != nil { + if cfg := cluster.GetFlownode().GetConfig(); cfg != "" { + if err := c.SetInputConfig(cfg); err != nil { return err } } + return nil } diff --git a/pkg/dbconfig/frontend_config.go b/pkg/dbconfig/frontend_config.go index 90b38cec..87170baa 100644 --- a/pkg/dbconfig/frontend_config.go +++ b/pkg/dbconfig/frontend_config.go @@ -28,8 +28,8 @@ type FrontendConfig struct { // ConfigureByCluster configures the frontend configuration by the given cluster. func (c *FrontendConfig) ConfigureByCluster(cluster *v1alpha1.GreptimeDBCluster) error { - if cluster.Spec.Frontend != nil && len(cluster.Spec.Frontend.Config) > 0 { - if err := c.SetInputConfig(cluster.Spec.Frontend.Config); err != nil { + if cfg := cluster.GetFrontend().GetConfig(); cfg != "" { + if err := c.SetInputConfig(cfg); err != nil { return err } } diff --git a/pkg/dbconfig/metasrv_config.go b/pkg/dbconfig/metasrv_config.go index 2136ab87..2603fa37 100644 --- a/pkg/dbconfig/metasrv_config.go +++ b/pkg/dbconfig/metasrv_config.go @@ -48,15 +48,15 @@ func (c *MetasrvConfig) ConfigureByCluster(cluster *v1alpha1.GreptimeDBCluster) c.StoreKeyPrefix = &cluster.Spec.Meta.StoreKeyPrefix } - if len(cluster.Spec.Meta.Config) > 0 { - if err := c.SetInputConfig(cluster.Spec.Meta.Config); err != nil { + if cfg := cluster.GetMeta().GetConfig(); cfg != "" { + if err := c.SetInputConfig(cfg); err != nil { return err } } - if cluster.Spec.RemoteWalProvider != nil && cluster.Spec.RemoteWalProvider.KafkaRemoteWal != nil { + if kafka := cluster.GetWALProvider().GetKafkaWAL(); kafka != nil { c.WalProvider = util.StringPtr("kafka") - c.WalBrokerEndpoints = cluster.Spec.RemoteWalProvider.KafkaRemoteWal.BrokerEndpoints + c.WalBrokerEndpoints = kafka.GetBrokerEndpoints() } } diff --git a/pkg/dbconfig/standalone_config.go b/pkg/dbconfig/standalone_config.go index c9240224..482bbc0a 100644 --- a/pkg/dbconfig/standalone_config.go +++ b/pkg/dbconfig/standalone_config.go @@ -102,11 +102,17 @@ func (c *StandaloneConfig) ConfigureByStandalone(standalone *v1alpha1.GreptimeDB } } - c.WalDir = util.StringPtr(standalone.Spec.LocalStorage.WalDir) - c.StorageDataHome = util.StringPtr(standalone.Spec.LocalStorage.DataHome) + // Set the wal dir if the kafka wal is not enabled. + if standalone.GetWALProvider().GetKafkaWAL() == nil && standalone.GetWALDir() != "" { + c.WalDir = util.StringPtr(standalone.GetWALDir()) + } + + if standalone.GetDataHome() != "" { + c.StorageDataHome = util.StringPtr(standalone.GetDataHome()) + } - if len(standalone.Spec.Config) > 0 { - if err := c.SetInputConfig(standalone.Spec.Config); err != nil { + if cfg := standalone.GetConfig(); cfg != "" { + if err := c.SetInputConfig(cfg); err != nil { return err } } diff --git a/tests/e2e/greptimedbcluster/test_basic_cluster.go b/tests/e2e/greptimedbcluster/test_basic_cluster.go index 784021c3..1e2cd1c4 100644 --- a/tests/e2e/greptimedbcluster/test_basic_cluster.go +++ b/tests/e2e/greptimedbcluster/test_basic_cluster.go @@ -83,11 +83,11 @@ func TestBasicCluster(ctx context.Context, h *helper.Helper) { Expect(err).NotTo(HaveOccurred(), "failed to delete cluster") Eventually(func() error { // The cluster will be deleted eventually. - return h.Get(ctx, client.ObjectKey{Name: testCluster.Namespace, Namespace: testCluster.Namespace}, testCluster) + return h.Get(ctx, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}, testCluster) }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the datanode should be retained") - datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind) + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") diff --git a/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go b/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go index b3804267..c10be95c 100644 --- a/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go +++ b/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go @@ -83,11 +83,11 @@ func TestClusterEnableFlow(ctx context.Context, h *helper.Helper) { Expect(err).NotTo(HaveOccurred(), "failed to delete cluster") Eventually(func() error { // The cluster will be deleted eventually. - return h.Get(ctx, client.ObjectKey{Name: testCluster.Namespace, Namespace: testCluster.Namespace}, testCluster) + return h.Get(ctx, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}, testCluster) }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the datanode should be retained") - datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind) + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") diff --git a/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go b/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go index 05617c12..98535f15 100644 --- a/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go +++ b/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go @@ -83,11 +83,11 @@ func TestClusterEnableRemoteWal(ctx context.Context, h *helper.Helper) { Expect(err).NotTo(HaveOccurred(), "failed to delete cluster") Eventually(func() error { // The cluster will be deleted eventually. - return h.Get(ctx, client.ObjectKey{Name: testCluster.Namespace, Namespace: testCluster.Namespace}, testCluster) + return h.Get(ctx, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}, testCluster) }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the datanode should be retained") - datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind) + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") diff --git a/tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go b/tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go new file mode 100644 index 00000000..296bd7e6 --- /dev/null +++ b/tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go @@ -0,0 +1,111 @@ +// Copyright 2024 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package greptimedbcluster + +import ( + "context" + "fmt" + "net" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/client" + + greptimev1alpha1 "github.com/GreptimeTeam/greptimedb-operator/apis/v1alpha1" + "github.com/GreptimeTeam/greptimedb-operator/controllers/common" + "github.com/GreptimeTeam/greptimedb-operator/tests/e2e/helper" +) + +// TestClusterStandaloneWAL tests a basic cluster. +func TestClusterStandaloneWAL(ctx context.Context, h *helper.Helper) { + const ( + testCRFile = "./testdata/resources/cluster/standalone-wal/cluster.yaml" + testSQLFile = "./testdata/sql/cluster/partition.sql" + ) + + By(fmt.Sprintf("greptimecluster test with CR file %s and SQL file %s", testCRFile, testSQLFile)) + + testCluster := new(greptimev1alpha1.GreptimeDBCluster) + err := h.LoadCR(testCRFile, testCluster) + Expect(err).NotTo(HaveOccurred(), "failed to load greptimedbcluster yaml file") + + err = h.Create(ctx, testCluster) + Expect(err).NotTo(HaveOccurred(), "failed to create greptimedbcluster") + + By("Check the status of testCluster") + Eventually(func() error { + clusterPhase, err := h.GetPhase(ctx, testCluster.Namespace, testCluster.Name, new(greptimev1alpha1.GreptimeDBCluster)) + if err != nil { + return err + } + + if clusterPhase != greptimev1alpha1.PhaseRunning { + return fmt.Errorf("cluster is not running") + } + + return nil + }, helper.DefaultTimeout, time.Second).ShouldNot(HaveOccurred()) + + By("Execute distributed SQL test") + frontendAddr, err := h.PortForward(ctx, testCluster.Namespace, common.ResourceName(testCluster.Name, greptimev1alpha1.FrontendComponentKind), int(testCluster.Spec.PostgreSQLPort)) + Expect(err).NotTo(HaveOccurred(), "failed to port forward frontend service") + Eventually(func() error { + conn, err := net.Dial("tcp", frontendAddr) + if err != nil { + return err + } + conn.Close() + return nil + }, helper.DefaultTimeout, time.Second).ShouldNot(HaveOccurred()) + + err = h.RunSQLTest(ctx, frontendAddr, testSQLFile) + Expect(err).NotTo(HaveOccurred(), "failed to run sql test") + + By("Kill the port forwarding process") + h.KillPortForwardProcess() + + By("Delete cluster") + err = h.Delete(ctx, testCluster) + Expect(err).NotTo(HaveOccurred(), "failed to delete cluster") + Eventually(func() error { + // The cluster will be deleted eventually. + return h.Get(ctx, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}, testCluster) + }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) + + By("The PVC of the datanode should be retained") + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, common.DatanodeFileStorageLabels) + Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") + Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") + + By("The PVC of the WAL should be deleted") + Eventually(func() error { + walPVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, common.WALFileStorageLabels) + if err != nil { + return err + } + if len(walPVCs) != 0 { + return fmt.Errorf("the number of WAL PVCs should be 0") + } + return nil + }, helper.DefaultTimeout, time.Second).ShouldNot(HaveOccurred()) + + By("Remove the PVC of the datanode") + for _, pvc := range datanodePVCs { + err = h.Delete(ctx, &pvc) + Expect(err).NotTo(HaveOccurred(), "failed to delete datanode PVC") + } +} diff --git a/tests/e2e/greptimedbcluster/test_scale_cluster.go b/tests/e2e/greptimedbcluster/test_scale_cluster.go index 35dc8bf8..c5a70a66 100644 --- a/tests/e2e/greptimedbcluster/test_scale_cluster.go +++ b/tests/e2e/greptimedbcluster/test_scale_cluster.go @@ -157,6 +157,6 @@ func TestScaleCluster(ctx context.Context, h *helper.Helper) { Expect(err).NotTo(HaveOccurred(), "failed to delete cluster") Eventually(func() error { // The cluster will be deleted eventually. - return h.Get(ctx, client.ObjectKey{Name: testCluster.Namespace, Namespace: testCluster.Namespace}, testCluster) + return h.Get(ctx, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}, testCluster) }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) } diff --git a/tests/e2e/greptimedbstandalone/test_basic_standalone.go b/tests/e2e/greptimedbstandalone/test_basic_standalone.go index 096b320c..5e74541e 100644 --- a/tests/e2e/greptimedbstandalone/test_basic_standalone.go +++ b/tests/e2e/greptimedbstandalone/test_basic_standalone.go @@ -83,11 +83,11 @@ func TestBasicStandalone(ctx context.Context, h *helper.Helper) { Expect(err).NotTo(HaveOccurred(), "failed to delete standalone") Eventually(func() error { // The standalone will be deleted eventually. - return h.Get(ctx, client.ObjectKey{Name: testStandalone.Namespace, Namespace: testStandalone.Namespace}, testStandalone) + return h.Get(ctx, client.ObjectKey{Name: testStandalone.Name, Namespace: testStandalone.Namespace}, testStandalone) }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the database should be retained") - dataPVCs, err := h.GetPVCs(ctx, testStandalone.Namespace, testStandalone.Name, greptimev1alpha1.StandaloneKind) + dataPVCs, err := h.GetPVCs(ctx, testStandalone.Namespace, testStandalone.Name, greptimev1alpha1.StandaloneKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get data PVCs") Expect(len(dataPVCs)).To(Equal(1), "the number of datanode PVCs should be equal to 1") diff --git a/tests/e2e/helper/helper.go b/tests/e2e/helper/helper.go index b4a63089..ceca28c9 100644 --- a/tests/e2e/helper/helper.go +++ b/tests/e2e/helper/helper.go @@ -32,6 +32,7 @@ import ( greptimev1alpha1 "github.com/GreptimeTeam/greptimedb-operator/apis/v1alpha1" "github.com/GreptimeTeam/greptimedb-operator/controllers/common" "github.com/GreptimeTeam/greptimedb-operator/controllers/constant" + "github.com/GreptimeTeam/greptimedb-operator/pkg/util" ) const ( @@ -124,24 +125,30 @@ func (h *Helper) GetPhase(ctx context.Context, namespace, name string, object cl } // GetPVCs returns the PVC list of the given component. -func (h *Helper) GetPVCs(ctx context.Context, namespace, name string, kind greptimev1alpha1.ComponentKind) ([]corev1.PersistentVolumeClaim, error) { +func (h *Helper) GetPVCs(ctx context.Context, namespace, name string, kind greptimev1alpha1.ComponentKind, additionalLabels map[string]string) ([]corev1.PersistentVolumeClaim, error) { + matachedLabels := map[string]string{ + constant.GreptimeDBComponentName: common.ResourceName(name, kind), + } + + if additionalLabels != nil { + matachedLabels = util.MergeStringMap(matachedLabels, additionalLabels) + } + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(name, kind), - }, + MatchLabels: matachedLabels, }) if err != nil { return nil, err } - pvcList := new(corev1.PersistentVolumeClaimList) + claims := new(corev1.PersistentVolumeClaimList) - if err = h.List(ctx, pvcList, client.InNamespace(namespace), + if err = h.List(ctx, claims, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, err } - return pvcList.Items, nil + return claims.Items, nil } // CleanEtcdData cleans up all data in etcd by executing the etcdctl command in the given pod. diff --git a/tests/e2e/testdata/resources/cluster/enable-remote-wal/cluster.yaml b/tests/e2e/testdata/resources/cluster/enable-remote-wal/cluster.yaml index 2ec83a56..04d579e8 100644 --- a/tests/e2e/testdata/resources/cluster/enable-remote-wal/cluster.yaml +++ b/tests/e2e/testdata/resources/cluster/enable-remote-wal/cluster.yaml @@ -31,7 +31,7 @@ spec: - etcd.etcd-cluster:2379 datanode: replicas: 3 - remoteWal: + wal: kafka: brokerEndpoints: - kafka-wal-kafka-bootstrap.kafka:9092 diff --git a/tests/e2e/testdata/resources/cluster/scale/cluster.yaml b/tests/e2e/testdata/resources/cluster/scale/cluster.yaml index e011e9ca..a2d535ea 100644 --- a/tests/e2e/testdata/resources/cluster/scale/cluster.yaml +++ b/tests/e2e/testdata/resources/cluster/scale/cluster.yaml @@ -32,7 +32,8 @@ spec: datanode: replicas: 1 storage: - storageRetainPolicy: Delete + fs: + storageRetainPolicy: Delete httpPort: 4000 rpcPort: 4001 mysqlPort: 4002 diff --git a/tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml b/tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml new file mode 100644 index 00000000..bdbc167b --- /dev/null +++ b/tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: greptime.io/v1alpha1 +kind: GreptimeDBCluster +metadata: + name: cluster-with-standalone-wal + namespace: default +spec: + base: + main: + image: greptime/greptimedb:latest + frontend: + replicas: 1 + meta: + replicas: 1 + etcdEndpoints: + - "etcd.etcd-cluster:2379" + datanode: + replicas: 1 + wal: + raftEngine: + fs: + name: wal + storageClassName: standard + storageSize: 5Gi + mountPath: /wal + storageRetainPolicy: Delete # The wal will be deleted after cluster is destroyed.