Skip to content

Commit

Permalink
merge with 0.24.1
Browse files Browse the repository at this point in the history
  • Loading branch information
Slach committed Nov 10, 2024
2 parents 4a219cb + 4ca83ef commit f68921a
Show file tree
Hide file tree
Showing 11 changed files with 166 additions and 268 deletions.
81 changes: 40 additions & 41 deletions deploy/helm/clickhouse-operator/README.md

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ metadata:
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}

# Template Parameters:
#
# NAMESPACE=kube-system
Expand Down
97 changes: 19 additions & 78 deletions deploy/helm/clickhouse-operator/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,92 +3,67 @@ operator:
# operator.image.repository -- image repository
repository: altinity/clickhouse-operator
# operator.image.tag -- image tag (chart's appVersion value will be used if not set)

tag: ""
# operator.image.pullPolicy -- image pull policy

pullPolicy: IfNotPresent
containerSecurityContext: {}
# operator.resources -- custom resource configuration, look `kubectl explain pod.spec.containers.resources` for details

# operator.resources -- custom resource configuration, check `kubectl explain pod.spec.containers.resources` for details
resources: {}
# limits:

# cpu: 100m

# memory: 128Mi

# requests:

# cpu: 100m

# memory: 128Mi

# operator.env -- additional environment variables for the clickhouse-operator container in deployment

# possible format value [{"name": "SAMPLE", "value": "text"}]
# possible format value `[{"name": "SAMPLE", "value": "text"}]`
env: []
metrics:
enabled: true
image:
# metrics.image.repository -- image repository
repository: altinity/metrics-exporter
# metrics.image.tag -- image tag (chart's appVersion value will be used if not set)

tag: ""
# metrics.image.pullPolicy -- image pull policy

pullPolicy: IfNotPresent
containerSecurityContext: {}
# metrics.resources -- custom resource configuration

resources: {}
# limits:

# cpu: 100m

# memory: 128Mi

# requests:

# cpu: 100m

# memory: 128Mi

# metrics.env -- additional environment variables for the deployment of metrics-exporter containers

# possible format value [{"name": "SAMPLE", "value": "text"}]
# possible format value `[{"name": "SAMPLE", "value": "text"}]`
env: []
# imagePullSecrets -- image pull secret for private images in clickhouse-operator pod

# possible value format [{"name":"your-secret-name"}]

# look `kubectl explain pod.spec.imagePullSecrets` for details
# possible value format `[{"name":"your-secret-name"}]`,
# check `kubectl explain pod.spec.imagePullSecrets` for details
imagePullSecrets: []
# podLabels -- labels to add to the clickhouse-operator pod

podLabels: {}
# podAnnotations -- annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details

# podAnnotations -- annotations to add to the clickhouse-operator pod, check `kubectl explain pod.spec.annotations` for details
# @default -- check the `values.yaml` file
podAnnotations:
prometheus.io/port: '8888'
prometheus.io/scrape: 'true'
clickhouse-operator-metrics/port: '9999'
clickhouse-operator-metrics/scrape: 'true'
# nameOverride -- override name of the chart

nameOverride: ""
# fullnameOverride -- full name of the chart.

fullnameOverride: ""
serviceAccount:
# serviceAccount.create -- specifies whether a service account should be created
create: true
# serviceAccount.annotations -- annotations to add to the service account

annotations: {}
# serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template

name:
rbac:
# rbac.create -- specifies whether cluster roles and cluster role bindings should be created
Expand All @@ -97,35 +72,26 @@ secret:
# secret.create -- create a secret with operator credentials
create: true
# secret.username -- operator credentials username

username: clickhouse_operator
# secret.password -- operator credentials password

password: clickhouse_operator_password
# nodeSelector -- node for scheduler pod assignment, look `kubectl explain pod.spec.nodeSelector` for details

# nodeSelector -- node for scheduler pod assignment, check `kubectl explain pod.spec.nodeSelector` for details
nodeSelector: {}
# tolerations -- tolerations for scheduler pod assignment, look `kubectl explain pod.spec.tolerations` for details

# tolerations -- tolerations for scheduler pod assignment, check `kubectl explain pod.spec.tolerations` for details
tolerations: []
# affinity -- affinity for scheduler pod assignment, look `kubectl explain pod.spec.affinity` for details

# affinity -- affinity for scheduler pod assignment, check `kubectl explain pod.spec.affinity` for details
affinity: {}
# podSecurityContext - operator deployment SecurityContext, look `kubectl explain pod.spec.securityContext` for details

# podSecurityContext - operator deployment SecurityContext, check `kubectl explain pod.spec.securityContext` for details
podSecurityContext: {}
# topologySpreadConstraints - topologySpreadConstraints affinity for scheduler pod assignment, look `kubectl explain pod.spec.topologySpreadConstraints` for details

# topologySpreadConstraints - topologySpreadConstraints affinity for scheduler pod assignment, check `kubectl explain pod.spec.topologySpreadConstraints` for details
topologySpreadConstraints: []
serviceMonitor:
# serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a (prometheus-operator)[https://github.com/prometheus-operator/prometheus-operator]
# serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator)
enabled: false
# serviceMonitor.additionalLabels -- additional labels for service monitor

additionalLabels: {}
# configs -- clickhouse-operator configs

# @default -- check the values.yaml file for the config content, auto-generated from latest operator release
# configs -- clickhouse operator configs
# @default -- check the `values.yaml` file for the config content (auto-generated from latest operator release)
configs:
confdFiles: null
configdFiles:
Expand Down Expand Up @@ -782,68 +748,43 @@ configs:
readme: |-
Templates in this folder are packaged with an operator and available via 'useTemplate'
keeperUsersdFiles: null
# additionalResources -- list of additional resources to create (are processed via `tpl` function), useful for create ClickHouse clusters together with clickhouse-operator, look `kubectl explain chi` for details

# additionalResources -- list of additional resources to create (processed via `tpl` function),
# useful for create ClickHouse clusters together with clickhouse-operator.
# check `kubectl explain chi` for details
additionalResources: []
# - |

# apiVersion: v1

# kind: ConfigMap

# metadata:

# name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm

# namespace: {{ .Release.Namespace }}

# - |

# apiVersion: v1

# kind: Secret

# metadata:

# name: {{ include "altinity-clickhouse-operator.fullname" . }}-s

# namespace: {{ .Release.Namespace }}

# stringData:

# mykey: my-value

# - |

# apiVersion: clickhouse.altinity.com/v1

# kind: ClickHouseInstallation

# metadata:

# name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi

# namespace: {{ .Release.Namespace }}

# spec:

# configuration:

# clusters:

# - name: default

# layout:

# shardsCount: 1

dashboards:
# dashboards.enabled -- provision grafana dashboards as configMaps (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-8.3.4/charts/grafana/values.yaml#L778 )
enabled: false
# dashboards.additionalLabels -- labels to add to a secret with dashboards

additionalLabels:
grafana_dashboard: ""
# dashboards.annotations -- annotations to add to a secret with dashboards

annotations: {}
grafana_folder: clickhouse
6 changes: 3 additions & 3 deletions dev/generate_helm_chart.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ function usage() {
cat << EOT
Script splits clickhouse-operator-install-bundle.yaml to separate files and adjusts them to conform the helm standards
NOTE script requires some pre-installed tools:
- yq ( https://mikefarah.gitbook.io/yq/ ) > v4.14.x. Do not use brew install yq in MacOS,Version is lower than it.
- yq ( https://mikefarah.gitbook.io/yq/ ) > v4.14.x
- jq ( https://github.com/stedolan/jq )
- helm-docs ( https://github.com/norwoodj/helm-docs )
- helm-docs ( https://github.com/norwoodj/helm-docs ) > v1.14.x
- perl ( https://learn.perl.org/installing/ )
Usage: ./generate_helm_chart.sh
Expand Down Expand Up @@ -70,7 +70,7 @@ function main() {
done

if [[ $(command -v helm-docs) ]]; then
helm-docs --chart-search-root="${chart_path}" --log-level=warning
helm-docs --skip-version-footer --chart-search-root="${chart_path}" --log-level=warning
else
echo "WARNING"
echo "helm-docs is not available, skip docs generation"
Expand Down
2 changes: 1 addition & 1 deletion pkg/announcer/announcer.go
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ func (a Announcer) tryToFindNamespaceNameEverywhere(m interface{}) (string, bool
return "", false
}

// findInObjectMeta
// findNamespaceName
func (a Announcer) findNamespaceName(m interface{}) (string, bool) {
if m == nil {
return "", false
Expand Down
40 changes: 36 additions & 4 deletions pkg/controller/chk/worker-chk-reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,21 @@ func (w *worker) reconcileCRAuxObjectsPreliminary(ctx context.Context, cr *apiCh
w.a.F().Error("failed to reconcile config map users. err: %v", err)
}

return w.reconcileCRAuxObjectsPreliminaryDomain(ctx, cr)
}

func (w *worker) reconcileCRAuxObjectsPreliminaryDomain(ctx context.Context, cr *apiChk.ClickHouseKeeperInstallation) error {
switch {
case cr.HostsCount() < cr.GetAncestor().HostsCount():
// Downscale
time.Sleep(120 * time.Second)
case cr.HostsCount() > cr.GetAncestor().HostsCount():
// Upscale
time.Sleep(30 * time.Second)
default:
// Same size
time.Sleep(10 * time.Second)
}
return nil
}

Expand Down Expand Up @@ -577,9 +592,9 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
reconcileStatefulSetOpts *statefulset.ReconcileOptions
)

if !host.IsLast() {
reconcileStatefulSetOpts = reconcileStatefulSetOpts.SetDoNotWait()
}
//if !host.IsLast() {
// reconcileStatefulSetOpts = reconcileStatefulSetOpts.SetDoNotWait()
//}

if err := w.reconcileConfigMapHost(ctx, host); err != nil {
w.a.V(1).
Expand Down Expand Up @@ -607,6 +622,8 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
Info("Data loss detected for host: %s. Will do force migrate", host.GetName())
}

_ = w.reconcileHostService(ctx, host)

if err := w.reconcileHostStatefulSet(ctx, host, reconcileStatefulSetOpts); err != nil {
w.a.V(1).
M(host).F().
Expand All @@ -620,8 +637,23 @@ func (w *worker) reconcileHostMain(ctx context.Context, host *api.Host) error {
storage.NewStoragePVC(kube.NewPVC(w.c.Client)),
).ReconcilePVCs(ctx, host, api.DesiredStatefulSet)

_ = w.reconcileHostService(ctx, host)
// _ = w.reconcileHostService(ctx, host)

return w.reconcileHostMainDomain(ctx, host)
}

func (w *worker) reconcileHostMainDomain(ctx context.Context, host *api.Host) error {
// Should we wait for host to startup
wait := false

if host.GetReconcileAttributes().IsAdd() {
wait = true
}

// Wait for host to startup
if wait {
time.Sleep(7 * time.Second)
}
return nil
}

Expand Down
9 changes: 5 additions & 4 deletions pkg/controller/common/statefulset/statefulset-reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ func (r *Reconciler) createStatefulSet(ctx context.Context, host *api.Host, regi
func (r *Reconciler) waitForConfigMapPropagation(ctx context.Context, host *api.Host) bool {
// No need to wait for ConfigMap propagation on stopped host
if host.IsStopped() {
r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - on stopped host")
r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - host is stopped")
return false
}

Expand All @@ -347,7 +347,7 @@ func (r *Reconciler) waitForConfigMapPropagation(ctx context.Context, host *api.
// What timeout is expected to be enough for ConfigMap propagation?
// In case timeout is not specified, no need to wait
if !host.GetCR().GetReconciling().HasConfigMapPropagationTimeout() {
r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - not applicable")
r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - not applicable due to missing timeout value")
return false
}

Expand All @@ -357,18 +357,19 @@ func (r *Reconciler) waitForConfigMapPropagation(ctx context.Context, host *api.
// May be there is no need to wait already
elapsed := time.Now().Sub(r.task.CmUpdate())
if elapsed >= timeout {
r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - already elapsed. %s/%s", elapsed, timeout)
r.a.V(1).M(host).F().Info("No need to wait for ConfigMap propagation - already elapsed. [elapsed/timeout: %s/%s]", elapsed, timeout)
return false
}

// Looks like we need to wait for Configmap propagation, after all
wait := timeout - elapsed
r.a.V(1).M(host).F().Info("Wait for ConfigMap propagation for %s %s/%s", wait, elapsed, timeout)
r.a.V(1).M(host).F().Info("Going to wait for ConfigMap propagation for: %s [elapsed/timeout: %s/%s]", wait, elapsed, timeout)
if util.WaitContextDoneOrTimeout(ctx, wait) {
log.V(2).Info("task is done")
return true
}

r.a.V(1).M(host).F().Info("Wait completed for: %s of timeout: %s]", wait, timeout)
return false
}

Expand Down
14 changes: 8 additions & 6 deletions pkg/model/chk/creator/probe.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ func (m *ProbeManager) CreateProbe(what interfaces.ProbeType, host *api.Host) *c
case interfaces.ProbeDefaultLiveness:
return m.createDefaultLivenessProbe(host)
case interfaces.ProbeDefaultReadiness:
return m.createDefaultReadinessProbe(host)
return nil
//return m.createDefaultReadinessProbe(host)
}
panic("unknown probe type")
}
Expand All @@ -53,9 +54,9 @@ func (m *ProbeManager) createDefaultLivenessProbe(host *api.Host) *core.Probe {
},
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 3,
FailureThreshold: 10,
InitialDelaySeconds: 5,
PeriodSeconds: 5,
FailureThreshold: 12,
}
}

Expand All @@ -77,7 +78,8 @@ func (m *ProbeManager) createDefaultReadinessProbe(host *api.Host) *core.Probe {
Port: intstr.Parse("9182"),
},
},
InitialDelaySeconds: 10,
PeriodSeconds: 3,
InitialDelaySeconds: 5,
PeriodSeconds: 5,
FailureThreshold: 12,
}
}
Loading

0 comments on commit f68921a

Please sign in to comment.