diff --git a/.github/workflows/check_helm.yaml b/.github/workflows/check_helm.yaml index aa5f3248c..07b467ee0 100644 --- a/.github/workflows/check_helm.yaml +++ b/.github/workflows/check_helm.yaml @@ -21,7 +21,7 @@ jobs: with: driver: docker container-runtime: containerd - kubernetes-version: v1.31.0 + kubernetes-version: v1.31.2 cpus: max memory: max diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index bb52caa0f..039c94c37 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -47,7 +47,7 @@ jobs: with: driver: docker container-runtime: containerd - kubernetes-version: v1.31.0 + kubernetes-version: v1.31.2 cpus: max memory: max @@ -66,6 +66,15 @@ jobs: minikube image load operator.tar minikube image load metrics-exporter.tar + - name: Deploy prometheus + run: | + cp ./deploy/prometheus/prometheus-sensitive-data.example.sh ./deploy/prometheus/prometheus-sensitive-data.sh + NO_WAIT=1 bash ./deploy/prometheus/create-prometheus.sh + + - name: Deploy minio + run: | + NO_WAIT=1 bash ./deploy/minio/create-minio.sh + - name: Run Tests id: run-tests continue-on-error: true @@ -93,12 +102,22 @@ jobs: test_mode="--test-to-end" fi - ~/venv/qa/bin/python3 ./tests/regression.py --only=/regression/e2e.test_operator/${ONLY} $test_mode --trim-results on -o short --native --log ./tests/raw.log + for test_file in ./tests/e2e/test_operator*.py; do + name=$(basename "$test_file" .py | sed 's/^test_//') + run_cmd="~/venv/qa/bin/python3 ./tests/regression.py --only=/regression/e2e?test_${name}/${ONLY} $test_mode --trim-results on -o short --native --log ./tests/raw_${name}.log && " + run_cmd+="~/venv/qa/bin/tfs --no-colors transform compact ./tests/raw_${name}.log ./tests/compact_${name}.log.txt && " + run_cmd+="~/venv/qa/bin/tfs --no-colors transform nice ./tests/raw_${name}.log ./tests/nice_${name}.log.txt && " + run_cmd+="~/venv/qa/bin/tfs --no-colors transform short ./tests/raw_${name}.log ./tests/short_${name}.log.txt && " + run_cmd+="bash -xec '~/venv/qa/bin/tfs --no-colors report results -a '${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/' ./tests/raw_${name}.log - --confidential --copyright 'Altinity Inc.' --logo ./tests/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./tests/report_${name}.html'" + + run_tests+=( + "${run_cmd}" + ) + done + printf "%s\n" "${run_tests[@]}" | xargs -P 2 -I {} bash -xec '{}' + + ls -la ./tests/*.html test_result=$? - ~/venv/qa/bin/tfs --no-colors transform compact ./tests/raw.log ./tests/compact.log - ~/venv/qa/bin/tfs --no-colors transform nice ./tests/raw.log ./tests/nice.log.txt - ~/venv/qa/bin/tfs --no-colors transform short ./tests/raw.log ./tests/short.log.txt - ~/venv/qa/bin/tfs --no-colors report results -a "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/" ./tests/raw.log - --confidential --copyright "Altinity Inc." --logo ./tests/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./tests/report.html echo "test_result=$test_result" >> $GITHUB_OUTPUT exit "$test_result" @@ -116,7 +135,7 @@ jobs: with: name: testflows-report path: | - tests/report.html + tests/*.html if-no-files-found: error retention-days: 90 diff --git a/.gitignore b/.gitignore index 3e2b68c7a..e2dc59a9e 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,8 @@ venv # Tests cached files tests/image/cache +tests/*.log.txt +tests/*.html # Skip tmp folder /tmp/ diff --git a/Vagrantfile b/Vagrantfile index a5e28b618..3570b934b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -121,15 +121,13 @@ Vagrant.configure(2) do |config| # docker curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) test" - apt-get install --no-install-recommends -y docker-ce pigz + apt-get install --no-install-recommends -y docker-ce docker-ce-cli containerd.io docker-compose-plugin pigz - # docker compose apt-get install -y --no-install-recommends python3-distutils curl -sL https://bootstrap.pypa.io/get-pip.py -o /tmp/get-pip.py python3 /tmp/get-pip.py - pip3 install -U setuptools - pip3 install -U docker-compose + pip3 install -U -r ./tests/image/requirements.txt # k9s CLI K9S_VERSION=$(curl -sL https://github.com/derailed/k9s/releases/latest -H "Accept: application/json" | jq -r .tag_name) @@ -170,7 +168,8 @@ Vagrant.configure(2) do |config| # K8S_VERSION=${K8S_VERSION:-1.23.1} # K8S_VERSION=${K8S_VERSION:-1.24.8} # K8S_VERSION=${K8S_VERSION:-1.25.4} - K8S_VERSION=${K8S_VERSION:-1.31.1} +# K8S_VERSION=${K8S_VERSION:-1.31.1} + K8S_VERSION=${K8S_VERSION:-1.31.2} export VALIDATE_YAML=true killall kubectl || true diff --git a/config/chi/templates.d/001-templates.json.example b/config/chi/templates.d/001-templates.json.example index 34468ecd2..bcab13d1a 100644 --- a/config/chi/templates.d/001-templates.json.example +++ b/config/chi/templates.d/001-templates.json.example @@ -29,7 +29,7 @@ "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/builder/templates-config/chi/templates.d/001-templates.json.example b/deploy/builder/templates-config/chi/templates.d/001-templates.json.example index 34468ecd2..bcab13d1a 100644 --- a/deploy/builder/templates-config/chi/templates.d/001-templates.json.example +++ b/deploy/builder/templates-config/chi/templates.d/001-templates.json.example @@ -29,7 +29,7 @@ "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml index 8f8b1aa0c..3bfdb206d 100644 --- a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml +++ b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-1-node-256M-for-test-only.yaml @@ -378,13 +378,13 @@ spec: containers: - name: clickhouse-keeper imagePullPolicy: Always - image: "clickhouse/clickhouse-keeper:latest-alpine" + image: "clickhouse/clickhouse-keeper:24.8" resources: requests: memory: "256M" cpu: "1" limits: - memory: "4Gi" + memory: "1Gi" cpu: "2" volumeMounts: - name: clickhouse-keeper-settings diff --git a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml index 38e2ae7e6..3d2a62465 100644 --- a/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml +++ b/deploy/clickhouse-keeper/clickhouse-keeper-manually/clickhouse-keeper-3-nodes-256M-for-test-only.yaml @@ -378,13 +378,13 @@ spec: containers: - name: clickhouse-keeper imagePullPolicy: Always - image: "clickhouse/clickhouse-keeper:latest-alpine" + image: "clickhouse/clickhouse-keeper:24.8" resources: requests: memory: "256M" cpu: "1" limits: - memory: "4Gi" + memory: "1Gi" cpu: "2" volumeMounts: - name: clickhouse-keeper-settings diff --git a/deploy/helm/clickhouse-operator/values.yaml b/deploy/helm/clickhouse-operator/values.yaml index 4eb72790b..dac0f236f 100644 --- a/deploy/helm/clickhouse-operator/values.yaml +++ b/deploy/helm/clickhouse-operator/values.yaml @@ -584,7 +584,7 @@ configs: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/minio/install-minio-operator.sh b/deploy/minio/install-minio-operator.sh index 408378eaf..0f6158fbc 100755 --- a/deploy/minio/install-minio-operator.sh +++ b/deploy/minio/install-minio-operator.sh @@ -4,7 +4,7 @@ echo "External value for \$MINIO_NAMESPACE=$MINIO_NAMESPACE" echo "External value for \$MINIO_OPERATOR_VERSION=$MINIO_OPERATOR_VERSION" MINIO_NAMESPACE="${MINIO_NAMESPACE:-minio}" -MINIO_OPERATOR_VERSION="${MINIO_OPERATOR_VERSION:-v4.1.3}" +MINIO_OPERATOR_VERSION="${MINIO_OPERATOR_VERSION:-v6.0.4}" echo "Setup minio.io" echo "OPTIONS" @@ -63,7 +63,9 @@ echo "Setup minio.io operator ${MINIO_OPERATOR_VERSION} into ${MINIO_NAMESPACE} ## TODO: need to refactor after next minio-operator release MINIO_KUSTOMIZE_DIR="${MINIO_OPERATOR_DIR}/resources" +sed -i -e "s/replicas: 2/replicas: 1/" $MINIO_KUSTOMIZE_DIR/base/deployment.yaml sed -i -e "s/name: minio-operator/name: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml +sed -i -e "s/: restricted/: baseline/" $MINIO_KUSTOMIZE_DIR/base/namespace.yaml sed -i -e "s/namespace: default/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml sed -i -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/base/*.yaml sed -i -e "s/namespace: minio-operator/namespace: ${MINIO_NAMESPACE}/" $MINIO_KUSTOMIZE_DIR/kustomization.yaml diff --git a/deploy/minio/install-minio-tenant.sh b/deploy/minio/install-minio-tenant.sh index 64c19759a..0dd86be0f 100755 --- a/deploy/minio/install-minio-tenant.sh +++ b/deploy/minio/install-minio-tenant.sh @@ -1,29 +1,14 @@ export MINIO_BACKUP_BUCKET=${MINIO_BACKUP_BUCKET:-clickhouse-backup} export MINIO_NAMESPACE="${MINIO_NAMESPACE:-minio}" -# look to https://github.com/minio/operator/blob/v4.1.3/examples/tenant.yaml -export MINIO_VERSION="${MINIO_VERSION:-RELEASE.2021-06-17T00-10-46Z}" -# export MINIO_VERSION="${MINIO_VERSION:-latest}" -export MINIO_CLIENT_VERSION="${MINIO_CLIENT_VERSION:-latest}" -export MINIO_CONSOLE_VERSION="${MINIO_CONSOLE_VERSION:-latest}" +# look to https://github.com/minio/operator/blob/master/examples/kustomization/base/tenant.yaml +export MINIO_VERSION="${MINIO_VERSION:-RELEASE.2024-10-02T17-50-41Z}" +export MINIO_CLIENT_VERSION="${MINIO_CLIENT_VERSION:-RELEASE.2024-10-29T15-34-59Z}" export MINIO_ACCESS_KEY="${MINIO_ACCESS_KEY:-minio-access-key}" -export MINIO_ACCESS_KEY_B64=$(echo -n "$MINIO_ACCESS_KEY" | base64) - export MINIO_SECRET_KEY="${MINIO_SECRET_KEY:-minio-secret-key}" -export MINIO_SECRET_KEY_B64=$(echo -n "$MINIO_SECRET_KEY" | base64) - -export MINIO_CONSOLE_PBKDF_PASSPHRASE="${MINIO_CONSOLE_PBKDF_PASSPHRASE}:-SECRET" -export MINIO_CONSOLE_PBKDF_PASSPHRASE_B64=$(echo -n "${MINIO_CONSOLE_PBKDF_PASSPHRASE}" | base64) - -export MINIO_CONSOLE_PBKDF_SALT="${MINIO_CONSOLE_PBKDF_SALT}:-SECRET" -export MINIO_CONSOLE_PBKDF_SALT_B64=$(echo -n "${MINIO_CONSOLE_PBKDF_SALT}" | base64) - export MINIO_CONSOLE_ACCESS_KEY="${MINIO_CONSOLE_ACCESS_KEY:-minio_console}" -export MINIO_CONSOLE_ACCESS_KEY_B64=$(echo -n "${MINIO_CONSOLE_ACCESS_KEY}" | base64) - export MINIO_CONSOLE_SECRET_KEY="${MINIO_CONSOLE_SECRET_KEY:-minio_console}" -export MINIO_CONSOLE_SECRET_KEY_B64=$(echo -n "${MINIO_CONSOLE_SECRET_KEY}" | base64) @@ -36,8 +21,7 @@ export MINIO_CONSOLE_SECRET_KEY_B64=$(echo -n "${MINIO_CONSOLE_SECRET_KEY}" | ba function wait_minio_to_start() { # Fetch Minio's deployment_name and namespace from params local namespace=$1 - local deployment_name=$2 - local pod_name=$3 + local pod_name=$2 echo -n "Waiting Minio pod '${namespace}/${pod_name}' to start" # Check minio tenatna have all pods ready @@ -47,13 +31,6 @@ function wait_minio_to_start() { done echo "...DONE" - echo -n "Waiting Minio Console Deployment '${namespace}/${deployment_name}' to start" - # Check minio-console deployment have all pods ready - while [[ $(kubectl --namespace="${namespace}" get deployments | grep "${deployment_name}" | grep -c "1/1") == "0" ]]; do - printf "." - sleep 1 - done - echo "...DONE" } @@ -78,7 +55,7 @@ kubectl apply -n "${MINIO_NAMESPACE}" -f <( envsubst < "$CUR_DIR/minio-tenant-template.yaml" ) -wait_minio_to_start "$MINIO_NAMESPACE" minio-console minio-pool-0-0 +wait_minio_to_start "$MINIO_NAMESPACE" minio-pool-0-0 kubectl apply -n "${MINIO_NAMESPACE}" -f <( envsubst < "$CUR_DIR/minio-tenant-create-bucket-template.yaml" diff --git a/deploy/minio/minio-tenant-create-bucket-template.yaml b/deploy/minio/minio-tenant-create-bucket-template.yaml index 11f7f6921..fa877edc8 100644 --- a/deploy/minio/minio-tenant-create-bucket-template.yaml +++ b/deploy/minio/minio-tenant-create-bucket-template.yaml @@ -9,11 +9,10 @@ spec: restartPolicy: Never containers: - name: minio-client - image: "minio/mc:${MINIO_CLIENT_VERSION}" + image: "quay.io/minio/mc:${MINIO_CLIENT_VERSION}" imagePullPolicy: IfNotPresent command: - /bin/bash - -xc - - mc alias set miniok8s https://minio-hl.minio:9000 ${MINIO_ACCESS_KEY} ${MINIO_SECRET_KEY} --insecure && mc mb miniok8s/${MINIO_BACKUP_BUCKET} --ignore-existing --insecure - + - mc alias set miniok8s http://minio-hl.${MINIO_NAMESPACE}:9000 ${MINIO_ACCESS_KEY} ${MINIO_SECRET_KEY} && mc mb miniok8s/${MINIO_BACKUP_BUCKET} --ignore-existing --insecure backoffLimit: 10 diff --git a/deploy/minio/minio-tenant-template.yaml b/deploy/minio/minio-tenant-template.yaml index 99cf9ed06..07e7fe9f7 100644 --- a/deploy/minio/minio-tenant-template.yaml +++ b/deploy/minio/minio-tenant-template.yaml @@ -2,25 +2,21 @@ apiVersion: v1 kind: Secret metadata: - name: minio-creds-secret + name: minio-configuration type: Opaque -data: - ## Access Key for MinIO Tenant, base64 encoded (echo -n 'minio' | base64) - accesskey: "$MINIO_ACCESS_KEY_B64" - ## Secret Key for MinIO Tenant, base64 encoded (echo -n 'minio123' | base64) - secretkey: "$MINIO_SECRET_KEY_B64" +stringData: + config.env: |- + export MINIO_ROOT_USER="$MINIO_ACCESS_KEY" + export MINIO_ROOT_PASSWORD="$MINIO_SECRET_KEY" + export MINIO_BROWSER="on" --- ## Secret to be used for MinIO Console apiVersion: v1 kind: Secret metadata: - name: minio-console-secret + name: console-user type: Opaque data: - ## Passphrase to encrypt jwt payload, base64 encoded (echo -n 'SECRET' | base64) - CONSOLE_PBKDF_PASSPHRASE: "$MINIO_CONSOLE_PBKDF_PASSPHRASE_B64" - ## Salt to encrypt jwt payload, base64 encoded (echo -n 'SECRET' | base64) - CONSOLE_PBKDF_SALT: "$MINIO_CONSOLE_PBKDF_SALT_B64" ## MinIO User Access Key (used for Console Login), base64 encoded (echo -n 'YOURCONSOLEACCESS' | base64) CONSOLE_ACCESS_KEY: "$MINIO_CONSOLE_ACCESS_KEY_B64" ## MinIO User Secret Key (used for Console Login), base64 encoded (echo -n 'YOURCONSOLESECRET' | base64) @@ -40,181 +36,204 @@ metadata: prometheus.io/port: "9000" prometheus.io/scrape: "true" -## If a scheduler is specified here, Tenant pods will be dispatched by specified scheduler. -## If not specified, the Tenant pods will be dispatched by default scheduler. -# scheduler: -# name: my-custom-scheduler - spec: + features: + ## Enable S3 specific features such as Bucket DNS which would allow `buckets` to be + ## accessible as DNS entries of form `.minio.namespace.svc.cluster.local` + ## This feature is turned off by default + bucketDNS: false + ## Specify a list of domains used to access MinIO and Console + domains: { } + ## Enable access via SFTP + ## This feature is turned off by default + enableSFTP: false + ## Create users in the Tenant using this field. Make sure to create secrets per user added here. + ## Secret should follow the format used in `minio-creds-secret`. + users: + - name: console-user + ## Create buckets using the console user + buckets: [] + # - name: "${MINIO_BACKUP_BUCKET}" + # region: "us-east-1" + # objectLock: true + ## This field is used only when "requestAutoCert" is set to true. Use this field to set CommonName + ## for the auto-generated certificate. Internal DNS name for the pod will be used if CommonName is + ## not provided. DNS name format is *.minio.default.svc.cluster.local + certConfig: { } + ## PodManagement policy for MinIO Tenant Pods. Can be "OrderedReady" or "Parallel" + ## Refer https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## for details. + podManagementPolicy: Parallel + ## Secret with credentials and configurations to be used by MinIO Tenant. + configuration: + name: minio-configuration + ## Add environment variables to be set in MinIO container (https://github.com/minio/minio/tree/master/docs/config) + env: [ ] + ## serviceMetadata allows passing additional labels and annotations to MinIO and Console specific + ## services created by the operator. + serviceMetadata: + minioServiceLabels: { } + minioServiceAnnotations: { } + consoleServiceLabels: { } + consoleServiceAnnotations: { } + ## PriorityClassName indicates the Pod priority and hence importance of a Pod relative to other Pods. + ## This is applied to MinIO pods only. + ## Refer Kubernetes documentation for details https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass/ + priorityClassName: "" + ## Use this field to provide one or more external CA certificates. This is used by MinIO + ## to verify TLS connections with other applications. + ## Certificate secret files will be mounted under /tmp/certs/CAs folder, supported types: + ## Opaque | kubernetes.io/tls | cert-manager.io/v1alpha2 | cert-manager.io/v1 + ## + ## ie: + ## + ## externalCaCertSecret: + ## - name: ca-certificate-1 + ## type: Opaque + ## - name: ca-certificate-2 + ## type: Opaque + ## - name: ca-certificate-3 + ## type: Opaque + ## + ## Create secrets as explained here: + ## https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + externalCaCertSecret: [ ] + ## Use this field to provide one or more Secrets with external certificates. This can be used to configure + ## TLS for MinIO Tenant pods. + ## Certificate secret files will be mounted under /tmp/certs folder, supported types: + ## Opaque | kubernetes.io/tls | cert-manager.io/v1alpha2 | cert-manager.io/v1 + ## + ## ie: + ## + ## externalCertSecret: + ## - name: domain-certificate-1 + ## type: kubernetes.io/tls + ## - name: domain-certificate-2 + ## type: kubernetes.io/tls + ## - name:domain-certificate-3 + ## type: kubernetes.io/tls + ## + ## Create secrets as explained here: + ## https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + externalCertSecret: [ ] + ## Use this field to provide client certificates for MinIO & KES. This can be used to configure + ## mTLS for MinIO and your KES server. Files will be mounted under /tmp/certs folder, supported types: + ## Opaque | kubernetes.io/tls | cert-manager.io/v1alpha2 | cert-manager.io/v1 + ## ie: + ## + ## externalClientCertSecret: + ## name: mtls-certificates-for-tenant + ## type: Opaque + ## + ## Create secrets as explained here: + ## https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + # externalClientCertSecret: {} + ## + ## Use this field to provide additional client certificate for the MinIO Tenant + ## Certificate secret files will be mounted under /tmp/certs folder, supported types: + ## Opaque | kubernetes.io/tls | cert-manager.io/v1alpha2 | cert-manager.io/v1 + ## + ## mount path inside container: + ## + ## certs + ## | + ## + client-0 + ## | + client.crt + ## | + client.key + ## + client-1 + ## | + client.crt + ## | + client.key + ## + client-2 + ## | + client.crt + ## | + client.key + ## ie: + ## + ## externalClientCertSecrets: + ## - name: client-certificate-1 + ## type: kubernetes.io/tls + ## - name: client-certificate-2 + ## type: kubernetes.io/tls + ## - name:client-certificate-3 + ## type: kubernetes.io/tls + ## + ## Create secrets as explained here: + ## https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret + externalClientCertSecrets: [ ] ## Registry location and Tag to download MinIO Server image - image: minio/minio:${MINIO_VERSION} - imagePullPolicy: IfNotPresent - - ## Secret with credentials to be used by MinIO Tenant. - ## Refers to the secret object created above. - credsSecret: - name: minio-creds-secret - - ## prometheus endpoint - env: - - name: MINIO_PROMETHEUS_AUTH_TYPE - value: public - - + image: quay.io/minio/minio:${MINIO_VERSION} + imagePullSecret: { } + ## Mount path where PV will be mounted inside container(s). + mountPath: /export + ## Sub path inside Mount path where MinIO stores data. + ## WARNING: + ## We recommend you to keep the same mountPath and the same subPath once the + ## Tenant has been deployed over your different PVs. + ## This is because if you change these values once Tenant is deployed, then + ## you will end up with multiple paths for different buckets. So please, be + ## very careful to keep same value for the life of the Tenant. + subPath: "" + ## Service account to be used for all the MinIO Pods + serviceAccountName: "" ## Specification for MinIO Pool(s) in this Tenant. pools: - ## Servers specifies the number of MinIO Tenant Pods / Servers in this pool. - ## For standalone mode, supply 1. For distributed mode, supply 4 or more. - ## Note that the operator does not support upgrading from standalone to distributed mode. + ## Servers specifies the number of MinIO Tenant Pods / Servers in this pool. + ## For standalone mode, supply 1. For distributed mode, supply 4 or more. + ## Note that the operator does not support upgrading from standalone to distributed mode. - servers: 1 - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: v1.min.io/tenant - operator: In - values: - - minio - - key: v1.min.io/pool - operator: In - values: - - pool-0 - topologyKey: kubernetes.io/hostname + ## custom name for the pool name: pool-0 + ## Specify one or more Pod Topology Spread Constraints to apply to pods deployed in the MinIO pool. + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints + topologySpreadConstraints: [ ] ## volumesPerServer specifies the number of volumes attached per MinIO Tenant Pod / Server. - volumesPerServer: 4 - + volumesPerServer: 1 + ## nodeSelector parameters for MinIO Pods. It specifies a map of key-value pairs. For the pod to be + ## eligible to run on a node, the node must have each of the + ## indicated key-value pairs as labels. + ## Read more here: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + nodeSelector: { } + ## Used to specify a toleration for a pod + tolerations: [ ] + ## Affinity settings for MinIO pods. Read more about affinity + ## here: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity. + affinity: + nodeAffinity: { } + podAffinity: { } + podAntiAffinity: { } + ## Configure resource requests and limits for MinIO containers + resources: { } ## This VolumeClaimTemplate is used across all the volumes provisioned for MinIO Tenant in this ## Pool. volumeClaimTemplate: - metadata: - name: data + apiVersion: v1 + kind: persistentvolumeclaims spec: accessModes: - ReadWriteOnce resources: requests: - storage: 10Gi - - ## Used to specify a toleration for a pod - # tolerations: - # - effect: NoSchedule - # key: dedicated - # operator: Equal - # value: storage - - ## nodeSelector parameters for MinIO Pods. It specifies a map of key-value pairs. For the pod to be - ## eligible to run on a node, the node must have each of the - ## indicated key-value pairs as labels. - ## Read more here: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - # nodeSelector: - # disktype: ssd - - ## Affinity settings for MinIO pods. Read more about affinity - ## here: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity. - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/hostname - # operator: In - # values: - # - hostname1 - # - hostname2 - # podAntiAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # - labelSelector: - # matchExpressions: - # - key: app - # operator: In - # values: - # - store - # topologyKey: "kubernetes.io/hostname" - - ## Configure resource requests and limits for MinIO containers - # resources: - # requests: - # cpu: 250m - # memory: 16Gi - # limits: - # cpu: 500m - # memory: 16Gi - - ## Mount path where PV will be mounted inside container(s). - mountPath: /export - ## Sub path inside Mount path where MinIO stores data. - # subPath: /data - - ## Use this field to provide a list of Secrets with external certificates. This can be used to to configure - ## TLS for MinIO Tenant pods. Create secrets as explained here: - ## https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret - # externalCertSecret: - # - name: tls-ssl-minio - # type: kubernetes.io/tls - + storage: 1Gi + ## Configure Pod's security context + ## We recommend to skip the recursive permission change by using + ## fsGroupChangePolicy as OnRootMismatch because it can be pretty + ## expensive for larger volumes with lots of small files. + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" + ## Configure container security context + containerSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault ## Enable automatic Kubernetes based certificate generation and signing as explained in ## https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster - requestAutoCert: true - - ## Enable S3 specific features such as Bucket DNS which would allow `buckets` to be - ## accessible as DNS entries of form `.minio..svc.cluster.local` - s3: - ## This feature is turned off by default - bucketDNS: true - - ## This field is used only when "requestAutoCert" is set to true. Use this field to set CommonName - ## for the auto-generated certificate. Internal DNS name for the pod will be used if CommonName is - ## not provided. DNS name format is *.minio.default.svc.cluster.local -# certConfig: -# commonName: "" -# organizationName: [] -# dnsNames: [] - - ## PodManagement policy for MinIO Tenant Pods. Can be "OrderedReady" or "Parallel" - ## Refer https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy - ## for details. - podManagementPolicy: Parallel - - ## serviceMetadata allows passing additional labels and annotations to MinIO and Console specific - ## services created by the operator. - serviceMetadata: - minioServiceLabels: - label: minio-svc - minioServiceAnnotations: - v2.min.io: minio-svc - consoleServiceLabels: - label: console-svc - consoleServiceAnnotations: - v2.min.io: console-svc - - ## Add environment variables to be set in MinIO container (https://github.com/minio/minio/tree/master/docs/config) - # env: - # - name: MINIO_BROWSER - # value: "off" # to turn-off browser - # - name: MINIO_STORAGE_CLASS_STANDARD - # value: "EC:2" - # ## For secure env vars like passwords, create an opaque Kubernetes secret and specify the secret in - # ## the `valueFrom` field. The `valueFrom` object must contain the following fields: - # ## `name` - the secret from which MinIO extracts the password, `key` - the data field - # ## within secret, whose value will be set to the env variable's value - # - name: MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD - # valueFrom: - # secretKeyRef: - # name: ldap-minio-secret - # key: MINIO_IDENTITY_LDAP_LOOKUP_BIND_PASSWORD - - ## PriorityClassName indicates the Pod priority and hence importance of a Pod relative to other Pods. - ## This is applied to MinIO pods only. - ## Refer Kubernetes documentation for details https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass/ - # priorityClassName: high-priority - - ## Define configuration for Console (Graphical user interface for MinIO) - ## Refer https://github.com/minio/console - console: - image: minio/console:${MINIO_CONSOLE_VERSION} - imagePullPolicy: IfNotPresent - replicas: 1 - consoleSecret: - name: minio-console-secret + requestAutoCert: false \ No newline at end of file diff --git a/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml b/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml index d477fa686..467849cce 100644 --- a/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml +++ b/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml @@ -12,14 +12,14 @@ spec: defaults: templates: dataVolumeClaimTemplate: openebs - podTemplate: clickhouse:24.3 + podTemplate: clickhouse-template templates: podTemplates: - metadata: - name: clickhouse:24.3 + name: clickhouse-template spec: containers: - - image: clickhouse/clickhouse-server:24.3 + - image: clickhouse/clickhouse-server:latest name: clickhouse ports: - containerPort: 8123 diff --git a/deploy/openebs/lvm/install-openebs-lvm.sh b/deploy/openebs/lvm/install-openebs-lvm.sh index 702507b0f..82b51511d 100755 --- a/deploy/openebs/lvm/install-openebs-lvm.sh +++ b/deploy/openebs/lvm/install-openebs-lvm.sh @@ -12,19 +12,19 @@ function ensure_namespace() { echo "External value for \$OPENEBS_NAMESPACE=$OPENEBS_NAMESPACE" -echo "External value for \$OPENEBS_OPERATOR_VERSION=$OPENEBS_OPERATOR_VERSION" +echo "External value for \$OPENEBS_HELM_VERSION=$OPENEBS_HELM_VERSION" echo "External value for \$VALIDATE_YAML=$VALIDATE_YAML" echo "External value for \$CLICKHOUSE_NAMESPACE=$CLICKHOUSE_NAMESPACE" OPENEBS_NAMESPACE="${OPENEBS_NAMESPACE:-openebs}" -OPENEBS_OPERATOR_VERSION="${OPENEBS_OPERATOR_VERSION:-v4.1.3}" +OPENEBS_HELM_VERSION="${OPENEBS_HELM_VERSION:-4.1.1}" VALIDATE_YAML="${VALIDATE_YAML:-"true"}" CLICKHOUSE_NAMESPACE="${CLICKHOUSE_NAMESPACE:-ch-test}" echo "Setup OpenEBS" echo "OPTIONS" echo "\$OPENEBS_NAMESPACE=${OPENEBS_NAMESPACE}" -echo "\$OPENEBS_OPERATOR_VERSION=${OPENEBS_OPERATOR_VERSION}" +echo "\$OPENEBS_HELM_VERSION=${OPENEBS_HELM_VERSION}" echo "\$VALIDATE_YAML=${VALIDATE_YAML}" echo "\$CLICKHOUSE_NAMESPACE=${CLICKHOUSE_NAMESPACE}" echo "" @@ -73,7 +73,7 @@ trap 'clean_dir ${TMP_DIR}' SIGHUP SIGINT SIGQUIT SIGFPE SIGALRM SIGTERM helm repo add openebs https://openebs.github.io/openebs helm repo update -echo "Setup OpenEBS operator ${OPENEBS_OPERATOR_VERSION} into ${OPENEBS_NAMESPACE} namespace" +echo "Setup OpenEBS operator ${OPENEBS_HELM_VERSION} into ${OPENEBS_NAMESPACE} namespace" # Let's setup all OpenEBS-related stuff into dedicated namespace ## TODO: need to refactor after next OPENEBS-operator release @@ -82,7 +82,7 @@ kubectl delete crd volumesnapshotcontents.snapshot.storage.k8s.io kubectl delete crd volumesnapshots.snapshot.storage.k8s.io # Setup OPENEBS-operator into dedicated namespace via kustomize -helm install openebs --namespace ${OPENEBS_NAMESPACE} openebs/openebs --set engines.replicated.mayastor.enabled=false --set engines.local.zfs.enabled=false --create-namespace --version 4.1.1 +helm install openebs --namespace "${OPENEBS_NAMESPACE}" openebs/openebs --set engines.replicated.mayastor.enabled=false --set engines.local.zfs.enabled=false --create-namespace --version "${OPENEBS_HELM_VERSION}" echo -n "Waiting '${OPENEBS_NAMESPACE}/openebs-lvm-localpv-controller' deployment to start" # Check grafana deployment have all pods ready @@ -93,7 +93,7 @@ done echo "...DONE" # Install the test storage class -kubectl apply -f openebs-lvm-storageclass.yaml -n ${OPENEBS_NAMESPACE} +kubectl apply -f openebs-lvm-storageclass.yaml -n "${OPENEBS_NAMESPACE}" # Install a simple Clickhouse instance using openebs echo "Setup simple Clickhouse into ${OPENEBS_NAMESPACE} namespace using OpenEBS" diff --git a/deploy/operator/clickhouse-operator-install-ansible.yaml b/deploy/operator/clickhouse-operator-install-ansible.yaml index 34db98648..408d3eee6 100644 --- a/deploy/operator/clickhouse-operator-install-ansible.yaml +++ b/deploy/operator/clickhouse-operator-install-ansible.yaml @@ -4714,7 +4714,7 @@ data: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml index 685c20712..a6c5c33a9 100644 --- a/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle-v1beta1.yaml @@ -4657,7 +4657,7 @@ data: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/operator/clickhouse-operator-install-bundle.yaml b/deploy/operator/clickhouse-operator-install-bundle.yaml index 5180e0606..053a176ca 100644 --- a/deploy/operator/clickhouse-operator-install-bundle.yaml +++ b/deploy/operator/clickhouse-operator-install-bundle.yaml @@ -4707,7 +4707,7 @@ data: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml index 3ba53a09b..00beb0441 100644 --- a/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml +++ b/deploy/operator/clickhouse-operator-install-template-v1beta1.yaml @@ -4657,7 +4657,7 @@ data: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 16c0727dc..db38721ff 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -4707,7 +4707,7 @@ data: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/operator/clickhouse-operator-install-tf.yaml b/deploy/operator/clickhouse-operator-install-tf.yaml index 4795ecee3..dbfc44dde 100644 --- a/deploy/operator/clickhouse-operator-install-tf.yaml +++ b/deploy/operator/clickhouse-operator-install-tf.yaml @@ -4714,7 +4714,7 @@ data: "containers" : [ { "name": "clickhouse", - "image": "clickhouse/clickhouse-server:23.8", + "image": "clickhouse/clickhouse-server:24.8", "ports": [ { "name": "http", diff --git a/deploy/prometheus/create-prometheus.sh b/deploy/prometheus/create-prometheus.sh index 274db7999..8b5a428e0 100755 --- a/deploy/prometheus/create-prometheus.sh +++ b/deploy/prometheus/create-prometheus.sh @@ -1,12 +1,10 @@ #!/bin/bash echo "External value for \$PROMETHEUS_NAMESPACE=$PROMETHEUS_NAMESPACE" -echo "External value for \$OPERATOR_NAMESPACE=$OPERATOR_NAMESPACE" echo "External value for \$VALIDATE_YAML=$VALIDATE_YAML" export PROMETHEUS_NAMESPACE="${PROMETHEUS_NAMESPACE:-prometheus}" -export OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-kube-system}" -export PROMETHEUS_OPERATOR_BRANCH="${PROMETHEUS_OPERATOR_BRANCH:-v0.68.0}" +export PROMETHEUS_OPERATOR_BRANCH="${PROMETHEUS_OPERATOR_BRANCH:-v0.78.2}" export ALERT_MANAGER_EXTERNAL_URL="${ALERT_MANAGER_EXTERNAL_URL:-http://localhost:9093}" # Possible values for "validate yaml" are values from --validate=XXX kubectl option. They are true/false ATM export VALIDATE_YAML="${VALIDATE_YAML:-true}" @@ -15,7 +13,6 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" echo "OPTIONS" echo "Setup Prometheus into \$PROMETHEUS_NAMESPACE=${PROMETHEUS_NAMESPACE} namespace" -echo "Expecting operator in \$OPERATOR_NAMESPACE=${OPERATOR_NAMESPACE} namespace" echo "Validate .yaml file \$VALIDATE_YAML=${VALIDATE_YAML}" echo "" echo "!!! IMPORTANT !!!" diff --git a/deploy/prometheus/prometheus-sensitive-data.example.sh b/deploy/prometheus/prometheus-sensitive-data.example.sh index 53a1395be..4e7db004e 100755 --- a/deploy/prometheus/prometheus-sensitive-data.example.sh +++ b/deploy/prometheus/prometheus-sensitive-data.example.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash # look at https://api.slack.com/incoming-webhooks how to enable external webhooks in Slack API -export SLACK_WEBHOOK_URL=https://hooks.slack.com/services/XXXX/YYYYY/ZZZZZ +export SLACK_WEBHOOK_URL=https://127.0.0.1/services/XXXX/YYYYY/ZZZZZ export SLACK_CHANNEL="#alerts-channel-name" diff --git a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml index 0aa1e7918..0be8b9b85 100644 --- a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml +++ b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-1-node-1GB-for-tests-only.yaml @@ -82,7 +82,7 @@ spec: containers: - name: kubernetes-zookeeper imagePullPolicy: IfNotPresent - image: "docker.io/zookeeper:3.8.4" + image: "docker.io/zookeeper:latest" ports: - containerPort: 2181 name: client diff --git a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml index 68bf22703..9e5de0c8b 100644 --- a/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml +++ b/deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/zookeeper-3-nodes-1GB-for-tests-only.yaml @@ -82,7 +82,7 @@ spec: containers: - name: kubernetes-zookeeper imagePullPolicy: IfNotPresent - image: "docker.io/zookeeper:3.8.4" + image: "docker.io/zookeeper:latest" ports: - containerPort: 2181 name: client diff --git a/docs/chi-examples/02-templates-01-pod-template.yaml b/docs/chi-examples/02-templates-01-pod-template.yaml index 715044a7b..edc09fb61 100644 --- a/docs/chi-examples/02-templates-01-pod-template.yaml +++ b/docs/chi-examples/02-templates-01-pod-template.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 # Container has explicitly specified resource requests resources: requests: diff --git a/docs/chi-examples/02-templates-03-host-template-volume-claim-and-pod-resources-limit.yaml b/docs/chi-examples/02-templates-03-host-template-volume-claim-and-pod-resources-limit.yaml index 5b3f121ef..9208e635c 100644 --- a/docs/chi-examples/02-templates-03-host-template-volume-claim-and-pod-resources-limit.yaml +++ b/docs/chi-examples/02-templates-03-host-template-volume-claim-and-pod-resources-limit.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-data-storage mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/02-templates-04-sidecar.yaml b/docs/chi-examples/02-templates-04-sidecar.yaml index 604b23366..d7f9cd543 100644 --- a/docs/chi-examples/02-templates-04-sidecar.yaml +++ b/docs/chi-examples/02-templates-04-sidecar.yaml @@ -39,7 +39,7 @@ spec: containers: # Main container - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: sidecar-configmap-volume mountPath: /configmap-volume diff --git a/docs/chi-examples/02-templates-06-syncUser.yaml b/docs/chi-examples/02-templates-06-syncUser.yaml index b60e4be93..3eab598d7 100644 --- a/docs/chi-examples/02-templates-06-syncUser.yaml +++ b/docs/chi-examples/02-templates-06-syncUser.yaml @@ -37,7 +37,7 @@ spec: ckop: my containers: - name: clickhouse - image: clickhouse-server:22.10.3.27 + image: clickhouse/clickhouse-server:latest imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/03-persistent-volume-02-pod-template.yaml b/docs/chi-examples/03-persistent-volume-02-pod-template.yaml index bdc563467..d03103152 100644 --- a/docs/chi-examples/03-persistent-volume-02-pod-template.yaml +++ b/docs/chi-examples/03-persistent-volume-02-pod-template.yaml @@ -19,7 +19,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: data-storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/03-persistent-volume-03-custom-labels-and-annotations.yaml b/docs/chi-examples/03-persistent-volume-03-custom-labels-and-annotations.yaml index 41da5a2bb..337025829 100644 --- a/docs/chi-examples/03-persistent-volume-03-custom-labels-and-annotations.yaml +++ b/docs/chi-examples/03-persistent-volume-03-custom-labels-and-annotations.yaml @@ -19,7 +19,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: data-storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-1.yaml b/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-1.yaml index 77c34fff8..9754d25bf 100644 --- a/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-1.yaml +++ b/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-1.yaml @@ -32,7 +32,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: data-storage-vc-template-1 mountPath: /data/clickhouse-01 diff --git a/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-2.yaml b/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-2.yaml index 51977d7ee..7159a394c 100644 --- a/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-2.yaml +++ b/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-2.yaml @@ -32,7 +32,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: data-storage-vc-template-1 mountPath: /data/clickhouse-01 diff --git a/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-3.yaml b/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-3.yaml index ff5e307b6..7fefdd80c 100644 --- a/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-3.yaml +++ b/docs/chi-examples/03-persistent-volume-07-multiple-resizable-volumes-3.yaml @@ -32,7 +32,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: data-storage-vc-template-1 mountPath: /data/clickhouse-01 diff --git a/docs/chi-examples/03-persistent-volume-07-security-context.yaml b/docs/chi-examples/03-persistent-volume-07-security-context.yaml index 6c104f5a6..0464acb6f 100644 --- a/docs/chi-examples/03-persistent-volume-07-security-context.yaml +++ b/docs/chi-examples/03-persistent-volume-07-security-context.yaml @@ -46,7 +46,7 @@ spec: add: [ "CAP_NICE", "CAP_IPC_LOCK" ] containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 imagePullPolicy: IfNotPresent volumeMounts: - name: data-storage-vc-template-1 @@ -55,7 +55,7 @@ spec: - clickhouse-server - --config-file=/etc/clickhouse-server/config.xml - name: clickhouse-backup - image: altinity/clickhouse-backup:2.2.7 + image: altinity/clickhouse-backup:latest imagePullPolicy: Always command: - /bin/bash diff --git a/docs/chi-examples/04-replication-zookeeper-03-minimal-AWS-persistent-volume.yaml b/docs/chi-examples/04-replication-zookeeper-03-minimal-AWS-persistent-volume.yaml index 06fe5c07e..c88527dd5 100644 --- a/docs/chi-examples/04-replication-zookeeper-03-minimal-AWS-persistent-volume.yaml +++ b/docs/chi-examples/04-replication-zookeeper-03-minimal-AWS-persistent-volume.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-storage-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/04-replication-zookeeper-04-medium-AWS-persistent-volume.yaml b/docs/chi-examples/04-replication-zookeeper-04-medium-AWS-persistent-volume.yaml index c21b85041..4d6296b19 100644 --- a/docs/chi-examples/04-replication-zookeeper-04-medium-AWS-persistent-volume.yaml +++ b/docs/chi-examples/04-replication-zookeeper-04-medium-AWS-persistent-volume.yaml @@ -25,7 +25,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-storage-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/04-replication-zookeeper-05-simple-PV.yaml b/docs/chi-examples/04-replication-zookeeper-05-simple-PV.yaml index 534184645..224e81f6c 100644 --- a/docs/chi-examples/04-replication-zookeeper-05-simple-PV.yaml +++ b/docs/chi-examples/04-replication-zookeeper-05-simple-PV.yaml @@ -34,4 +34,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/06-advanced-layout-01-one-shard-many-replicas.yaml b/docs/chi-examples/06-advanced-layout-01-one-shard-many-replicas.yaml index 516133c40..c5c582a22 100644 --- a/docs/chi-examples/06-advanced-layout-01-one-shard-many-replicas.yaml +++ b/docs/chi-examples/06-advanced-layout-01-one-shard-many-replicas.yaml @@ -21,10 +21,10 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 - name: clickhouse:nonexist spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.822 + image: clickhouse/clickhouse-server:24.822 diff --git a/docs/chi-examples/06-advanced-layout-02-shards.yaml b/docs/chi-examples/06-advanced-layout-02-shards.yaml index 82be10895..02e81ebb2 100644 --- a/docs/chi-examples/06-advanced-layout-02-shards.yaml +++ b/docs/chi-examples/06-advanced-layout-02-shards.yaml @@ -10,17 +10,17 @@ spec: shards: - replicas: - templates: - podTemplate: clickhouse:22.8 + podTemplate: clickhouse:23.8 httpPort: 8000 tcpPort: 8001 interserverHTTPPort: 8002 - templates: - podTemplate: clickhouse:23.3 + podTemplate: clickhouse:24.3 httpPort: 9000 tcpPort: 9001 interserverHTTPPort: 9002 - templates: - podTemplate: clickhouse:23.8 + podTemplate: clickhouse:24.8 httpPort: 10000 tcpPort: 10001 interserverHTTPPort: 10002 @@ -28,21 +28,21 @@ spec: templates: podTemplates: - - name: clickhouse:22.8 + - name: clickhouse:23.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:22.8 + image: clickhouse/clickhouse-server:23.8 - - name: clickhouse:23.3 + - name: clickhouse:24.3 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.3 + image: clickhouse/clickhouse-server:24.3 - - name: clickhouse:23.8 + - name: clickhouse:24.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/06-advanced-layout-03-replicas.yaml b/docs/chi-examples/06-advanced-layout-03-replicas.yaml index 5af781e0b..ad7b3dd3c 100644 --- a/docs/chi-examples/06-advanced-layout-03-replicas.yaml +++ b/docs/chi-examples/06-advanced-layout-03-replicas.yaml @@ -10,17 +10,17 @@ spec: shardsCount: 4 replicas: - templates: - podTemplate: clickhouse:22.8 + podTemplate: clickhouse:23.8 httpPort: 8000 tcpPort: 8001 interserverHTTPPort: 8002 - templates: - podTemplate: clickhouse:23.3 + podTemplate: clickhouse:24.3 httpPort: 9000 tcpPort: 9001 interserverHTTPPort: 9002 - templates: - podTemplate: clickhouse:23.8 + podTemplate: clickhouse:24.8 httpPort: 10000 tcpPort: 10001 interserverHTTPPort: 10002 @@ -28,21 +28,21 @@ spec: templates: podTemplates: - - name: clickhouse:22.8 + - name: clickhouse:23.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:22.8 + image: clickhouse/clickhouse-server:23.8 - - name: clickhouse:23.3 + - name: clickhouse:24.3 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.3 + image: clickhouse/clickhouse-server:24.3 - - name: clickhouse:23.8 + - name: clickhouse:24.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/06-advanced-layout-04-multiple-clusters.yaml b/docs/chi-examples/06-advanced-layout-04-multiple-clusters.yaml index b0dcf3450..8ed705d1a 100644 --- a/docs/chi-examples/06-advanced-layout-04-multiple-clusters.yaml +++ b/docs/chi-examples/06-advanced-layout-04-multiple-clusters.yaml @@ -48,16 +48,16 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:22.8 + image: clickhouse/clickhouse-server:23.8 - name: t2 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.3 + image: clickhouse/clickhouse-server:24.3 - name: t3 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/08-clickhouse-version-update-01-initial-position.yaml b/docs/chi-examples/08-clickhouse-version-update-01-initial-position.yaml index 528e23074..720ceb6b8 100644 --- a/docs/chi-examples/08-clickhouse-version-update-01-initial-position.yaml +++ b/docs/chi-examples/08-clickhouse-version-update-01-initial-position.yaml @@ -7,7 +7,7 @@ spec: clusters: - name: update templates: - podTemplate: clickhouse:23.3 + podTemplate: clickhouse:24.8 layout: shards: - replicas: @@ -17,8 +17,8 @@ spec: templates: podTemplates: - - name: clickhouse:23.3 + - name: clickhouse:24.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.3 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/08-clickhouse-version-update-02-apply-update-one.yaml b/docs/chi-examples/08-clickhouse-version-update-02-apply-update-one.yaml index 68ae27418..1a93bfe65 100644 --- a/docs/chi-examples/08-clickhouse-version-update-02-apply-update-one.yaml +++ b/docs/chi-examples/08-clickhouse-version-update-02-apply-update-one.yaml @@ -7,7 +7,7 @@ spec: clusters: - name: update templates: - podTemplate: clickhouse:23.3 + podTemplate: clickhouse:24.3 layout: shards: - replicas: @@ -15,18 +15,18 @@ spec: - tcpPort: 9000 - tcpPort: 9000 templates: - podTemplate: clickhouse:23.8 + podTemplate: clickhouse:24.8 templates: podTemplates: - - name: clickhouse:23.3 + - name: clickhouse:24.3 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.3 + image: clickhouse/clickhouse-server:24.3 - - name: clickhouse:23.8 + - name: clickhouse:24.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/08-clickhouse-version-update-03-apply-update-all.yaml b/docs/chi-examples/08-clickhouse-version-update-03-apply-update-all.yaml index b62644237..8866f937f 100644 --- a/docs/chi-examples/08-clickhouse-version-update-03-apply-update-all.yaml +++ b/docs/chi-examples/08-clickhouse-version-update-03-apply-update-all.yaml @@ -17,8 +17,8 @@ spec: templates: podTemplates: - - name: clickhouse:23.8 + - name: clickhouse:24.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/09-rolling-update-emptydir-01-initial-position.yaml b/docs/chi-examples/09-rolling-update-emptydir-01-initial-position.yaml index e73929760..2b4687f6e 100644 --- a/docs/chi-examples/09-rolling-update-emptydir-01-initial-position.yaml +++ b/docs/chi-examples/09-rolling-update-emptydir-01-initial-position.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-storage mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/09-rolling-update-emptydir-02-apply-update.yaml b/docs/chi-examples/09-rolling-update-emptydir-02-apply-update.yaml index 7d92f614b..dce19ec4c 100644 --- a/docs/chi-examples/09-rolling-update-emptydir-02-apply-update.yaml +++ b/docs/chi-examples/09-rolling-update-emptydir-02-apply-update.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-storage mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml b/docs/chi-examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml index a21bf7a79..ca887fb4f 100644 --- a/docs/chi-examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml +++ b/docs/chi-examples/10-zones-01-simple-01-aws-pods-in-availability-zones.yaml @@ -49,7 +49,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 - name: clickhouse-in-zone-us-east-1b zone: @@ -58,4 +58,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/10-zones-01-simple-02-aws-pod-per-host.yaml b/docs/chi-examples/10-zones-01-simple-02-aws-pod-per-host.yaml index 437bc7fee..b61bd9f55 100644 --- a/docs/chi-examples/10-zones-01-simple-02-aws-pod-per-host.yaml +++ b/docs/chi-examples/10-zones-01-simple-02-aws-pod-per-host.yaml @@ -40,4 +40,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml b/docs/chi-examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml index 12a77cac1..5e1b0c1b9 100644 --- a/docs/chi-examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml +++ b/docs/chi-examples/10-zones-02-advanced-01-aws-pods-in-availability-zones.yaml @@ -56,7 +56,7 @@ spec: - "us-east-1a" containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 - name: clickhouse-in-zone-us-east-1b spec: @@ -72,4 +72,4 @@ spec: - "us-east-1b" containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/10-zones-02-advanced-02-aws-pod-per-host.yaml b/docs/chi-examples/10-zones-02-advanced-02-aws-pod-per-host.yaml index e65e7d5a7..ac9f0c9b2 100644 --- a/docs/chi-examples/10-zones-02-advanced-02-aws-pod-per-host.yaml +++ b/docs/chi-examples/10-zones-02-advanced-02-aws-pod-per-host.yaml @@ -55,4 +55,4 @@ spec: topologyKey: "kubernetes.io/hostname" containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml b/docs/chi-examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml index 3704698b4..9ed15dfe7 100644 --- a/docs/chi-examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml +++ b/docs/chi-examples/10-zones-03-advanced-03-pod-per-host-default-storage-class.yaml @@ -76,7 +76,7 @@ spec: topologyKey: "kubernetes.io/hostname" containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml b/docs/chi-examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml index 5a0379d02..e24c0f116 100644 --- a/docs/chi-examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml +++ b/docs/chi-examples/10-zones-04-advanced-04-pod-per-host-local-storage.yaml @@ -57,7 +57,7 @@ spec: type: DirectoryOrCreate containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: local-path mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/11-local-storage-01-simple-host-path.yaml b/docs/chi-examples/11-local-storage-01-simple-host-path.yaml index 2e77b1875..099913515 100644 --- a/docs/chi-examples/11-local-storage-01-simple-host-path.yaml +++ b/docs/chi-examples/11-local-storage-01-simple-host-path.yaml @@ -48,7 +48,7 @@ spec: type: DirectoryOrCreate containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: # Specify reference to volume on local filesystem - name: local-path diff --git a/docs/chi-examples/11-local-storage-02-advanced-host-path.yaml b/docs/chi-examples/11-local-storage-02-advanced-host-path.yaml index 737f577ac..be9c004d4 100644 --- a/docs/chi-examples/11-local-storage-02-advanced-host-path.yaml +++ b/docs/chi-examples/11-local-storage-02-advanced-host-path.yaml @@ -62,7 +62,7 @@ spec: type: DirectoryOrCreate containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: # Specify reference to volume on local filesystem - name: local-path diff --git a/docs/chi-examples/12-troubleshooting-01.yaml b/docs/chi-examples/12-troubleshooting-01.yaml index 68cffbec6..478d104d3 100644 --- a/docs/chi-examples/12-troubleshooting-01.yaml +++ b/docs/chi-examples/12-troubleshooting-01.yaml @@ -18,7 +18,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/bash" - "-c" diff --git a/docs/chi-examples/13-distribution-02-3x3-circular-replication.yaml b/docs/chi-examples/13-distribution-02-3x3-circular-replication.yaml index ffd58dbde..e9277a935 100644 --- a/docs/chi-examples/13-distribution-02-3x3-circular-replication.yaml +++ b/docs/chi-examples/13-distribution-02-3x3-circular-replication.yaml @@ -20,7 +20,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 --- apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" @@ -44,4 +44,4 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/13-distribution-03-3x3-distribution-detailed.yaml b/docs/chi-examples/13-distribution-03-3x3-distribution-detailed.yaml index 27b97ddbc..cc688231c 100644 --- a/docs/chi-examples/13-distribution-03-3x3-distribution-detailed.yaml +++ b/docs/chi-examples/13-distribution-03-3x3-distribution-detailed.yaml @@ -27,4 +27,4 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/15-hostNetwork-01-simple.yaml b/docs/chi-examples/15-hostNetwork-01-simple.yaml index 79191166d..7014b28c3 100644 --- a/docs/chi-examples/15-hostNetwork-01-simple.yaml +++ b/docs/chi-examples/15-hostNetwork-01-simple.yaml @@ -24,4 +24,4 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/15-hostNetwork-02-simple-port-distribution.yaml b/docs/chi-examples/15-hostNetwork-02-simple-port-distribution.yaml index 3391b93f4..6c31106be 100644 --- a/docs/chi-examples/15-hostNetwork-02-simple-port-distribution.yaml +++ b/docs/chi-examples/15-hostNetwork-02-simple-port-distribution.yaml @@ -34,4 +34,4 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/15-hostNetwork-03-expanded-port-distribution.yaml b/docs/chi-examples/15-hostNetwork-03-expanded-port-distribution.yaml index 92886f058..1e9add242 100644 --- a/docs/chi-examples/15-hostNetwork-03-expanded-port-distribution.yaml +++ b/docs/chi-examples/15-hostNetwork-03-expanded-port-distribution.yaml @@ -41,4 +41,4 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/15-hostNetwork-04-simple-fixed-replicas.yaml b/docs/chi-examples/15-hostNetwork-04-simple-fixed-replicas.yaml index bee7d74d4..03df7776e 100644 --- a/docs/chi-examples/15-hostNetwork-04-simple-fixed-replicas.yaml +++ b/docs/chi-examples/15-hostNetwork-04-simple-fixed-replicas.yaml @@ -50,4 +50,4 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/15-hostNetwork-05-expanded-fixed-replicas.yaml b/docs/chi-examples/15-hostNetwork-05-expanded-fixed-replicas.yaml index e6ba0cb72..fae71301f 100644 --- a/docs/chi-examples/15-hostNetwork-05-expanded-fixed-replicas.yaml +++ b/docs/chi-examples/15-hostNetwork-05-expanded-fixed-replicas.yaml @@ -51,4 +51,4 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/docs/chi-examples/17-monitoring-cluster-01.yaml b/docs/chi-examples/17-monitoring-cluster-01.yaml index 149ab682d..eaa1e2050 100644 --- a/docs/chi-examples/17-monitoring-cluster-01.yaml +++ b/docs/chi-examples/17-monitoring-cluster-01.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-storage-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/19-pod-generate-name.yaml b/docs/chi-examples/19-pod-generate-name.yaml index a3889d8b8..f14f50311 100644 --- a/docs/chi-examples/19-pod-generate-name.yaml +++ b/docs/chi-examples/19-pod-generate-name.yaml @@ -18,7 +18,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: clickhouse-storage-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/22-secure-ssl-01-files-plaintext.yaml b/docs/chi-examples/22-secure-ssl-01-files-plaintext.yaml index 3a3f8c996..f5ec7c04b 100644 --- a/docs/chi-examples/22-secure-ssl-01-files-plaintext.yaml +++ b/docs/chi-examples/22-secure-ssl-01-files-plaintext.yaml @@ -12,7 +12,7 @@ spec: spec: containers: - name: clickhouse - image: altinity/clickhouse-server:23.8.8.21.altinitystable + image: clickhouse/clickhouse-server:latest imagePullPolicy: IfNotPresent configuration: clusters: @@ -134,7 +134,7 @@ metadata: spec: containers: - name: clickhouse-client - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest command: [ "/bin/sh", "-c", "sleep 3600" ] volumeMounts: - name: config diff --git a/docs/chi-examples/22-secure-ssl-02-files-secret-ref.yaml b/docs/chi-examples/22-secure-ssl-02-files-secret-ref.yaml index a1ff47d98..2bd9e7670 100644 --- a/docs/chi-examples/22-secure-ssl-02-files-secret-ref.yaml +++ b/docs/chi-examples/22-secure-ssl-02-files-secret-ref.yaml @@ -85,7 +85,7 @@ spec: spec: containers: - name: clickhouse - image: altinity/clickhouse-server:23.8.8.21.altinitystable + image: clickhouse/clickhouse-server:latest imagePullPolicy: IfNotPresent configuration: clusters: @@ -156,7 +156,7 @@ metadata: spec: containers: - name: clickhouse-client - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest command: [ "/bin/sh", "-c", "sleep 3600" ] volumeMounts: - name: config diff --git a/docs/chi-examples/22-secure-ssl-03-files-multi-secrets-ref.yaml b/docs/chi-examples/22-secure-ssl-03-files-multi-secrets-ref.yaml index 40d23340e..9ab32bd11 100644 --- a/docs/chi-examples/22-secure-ssl-03-files-multi-secrets-ref.yaml +++ b/docs/chi-examples/22-secure-ssl-03-files-multi-secrets-ref.yaml @@ -99,7 +99,7 @@ spec: spec: containers: - name: clickhouse - image: altinity/clickhouse-server:23.8.8.21.altinitystable + image: clickhouse/clickhouse-server:latest imagePullPolicy: IfNotPresent configuration: clusters: @@ -170,7 +170,7 @@ metadata: spec: containers: - name: clickhouse-client - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest command: [ "/bin/sh", "-c", "sleep 3600" ] volumeMounts: - name: config diff --git a/docs/chi-examples/99-clickhouseinstallation-max.yaml b/docs/chi-examples/99-clickhouseinstallation-max.yaml index 29569903d..ec92864c4 100644 --- a/docs/chi-examples/99-clickhouseinstallation-max.yaml +++ b/docs/chi-examples/99-clickhouseinstallation-max.yaml @@ -569,7 +569,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: default-volume-claim mountPath: /var/lib/clickhouse @@ -581,7 +581,7 @@ spec: memory: "64Mi" cpu: "100m" - name: clickhouse-log - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/sh" - "-c" @@ -599,7 +599,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: default-volume-claim mountPath: /var/lib/clickhouse @@ -611,7 +611,7 @@ spec: memory: "64Mi" cpu: "100m" - name: clickhouse-log - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/sh" - "-c" diff --git a/docs/chi-examples/evolution/01-persistent-volume.yaml b/docs/chi-examples/evolution/01-persistent-volume.yaml index c5b162fa2..fe2f5bd59 100644 --- a/docs/chi-examples/evolution/01-persistent-volume.yaml +++ b/docs/chi-examples/evolution/01-persistent-volume.yaml @@ -19,7 +19,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/evolution/02-introduce-replication.yaml b/docs/chi-examples/evolution/02-introduce-replication.yaml index 6f14bd9f7..1910a3dfc 100644 --- a/docs/chi-examples/evolution/02-introduce-replication.yaml +++ b/docs/chi-examples/evolution/02-introduce-replication.yaml @@ -22,7 +22,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/evolution/03-introduce-more-shards-and-zones.yaml b/docs/chi-examples/evolution/03-introduce-more-shards-and-zones.yaml index 7a6947e0b..3e2aa57fe 100644 --- a/docs/chi-examples/evolution/03-introduce-more-shards-and-zones.yaml +++ b/docs/chi-examples/evolution/03-introduce-more-shards-and-zones.yaml @@ -28,7 +28,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/evolution/04-update-introduce-canary.yaml b/docs/chi-examples/evolution/04-update-introduce-canary.yaml index 1bcaca634..ff5d59c65 100644 --- a/docs/chi-examples/evolution/04-update-introduce-canary.yaml +++ b/docs/chi-examples/evolution/04-update-introduce-canary.yaml @@ -21,7 +21,7 @@ spec: replicas: - name: "1" templates: - podTemplate: pod-template-with-volume-23.3 + podTemplate: pod-template-with-volume-24.3 templates: podTemplates: @@ -35,12 +35,12 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse - - name: pod-template-with-volume-23.3 + - name: pod-template-with-volume-24.3 zone: key: "clickhouse" values: @@ -50,7 +50,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.3 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chi-examples/evolution/05-update-propagate-update.yaml b/docs/chi-examples/evolution/05-update-propagate-update.yaml index 7a6947e0b..3e2aa57fe 100644 --- a/docs/chi-examples/evolution/05-update-propagate-update.yaml +++ b/docs/chi-examples/evolution/05-update-propagate-update.yaml @@ -28,7 +28,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/chit-examples/103-templates.yaml b/docs/chit-examples/103-templates.yaml index 721bd8541..97d73a4a4 100644 --- a/docs/chit-examples/103-templates.yaml +++ b/docs/chit-examples/103-templates.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 ports: - name: http containerPort: 8123 diff --git a/docs/custom_resource_explained.md b/docs/custom_resource_explained.md index 648dbf079..ceb7895f5 100644 --- a/docs/custom_resource_explained.md +++ b/docs/custom_resource_explained.md @@ -568,7 +568,7 @@ with additional sections, such as: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: default-volume-claim mountPath: /var/lib/clickhouse diff --git a/docs/operator_configuration.md b/docs/operator_configuration.md index d7c56a3c3..73c11ec4b 100644 --- a/docs/operator_configuration.md +++ b/docs/operator_configuration.md @@ -149,7 +149,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 ``` Template needs to be deployed to some namespace, and later on used in the installation: diff --git a/docs/quick_start.md b/docs/quick_start.md index 6e650a9f1..d5e4efd87 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -285,7 +285,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: data-storage-vc-template mountPath: /var/lib/clickhouse diff --git a/docs/replication_setup.md b/docs/replication_setup.md index e5096a6a3..27a59c965 100644 --- a/docs/replication_setup.md +++ b/docs/replication_setup.md @@ -48,7 +48,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 ``` diff --git a/docs/security_hardening.md b/docs/security_hardening.md index 67614ae12..770b8400d 100644 --- a/docs/security_hardening.md +++ b/docs/security_hardening.md @@ -257,7 +257,7 @@ spec: spec: containers: - name: clickhouse - image: altinity/clickhouse-server:23.3.8.22.altinitystable + image: altinity/clickhouse-server:24.3.12.76.altinitystable env: - name: AWS_ACCESS_KEY_ID valueFrom: @@ -512,7 +512,7 @@ spec: - name: default containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: altinity/clickhouse-server:24.3.12.76.altinitystable ports: - name: http containerPort: 8123 diff --git a/pkg/controller/chi/worker-migrator.go b/pkg/controller/chi/worker-migrator.go index 4d3a5c014..c0f069937 100644 --- a/pkg/controller/chi/worker-migrator.go +++ b/pkg/controller/chi/worker-migrator.go @@ -110,7 +110,7 @@ func (w *worker) migrateTables(ctx context.Context, host *api.Host, opts ...*mig WithEvent(host.GetCR(), a.EventActionCreate, a.EventReasonCreateFailed). WithAction(host.GetCR()). M(host).F(). - Error("ERROR add tables added successfully on shard/host:%d/%d cluster:%s err:%v", + Error("ERROR adding tables on shard/host:%d/%d cluster:%s err:%v", host.Runtime.Address.ShardIndex, host.Runtime.Address.ReplicaIndex, host.Runtime.Address.ClusterName, err) } return err diff --git a/tests/README.md b/tests/README.md index 62b071d22..a5dd1fb46 100644 --- a/tests/README.md +++ b/tests/README.md @@ -10,7 +10,7 @@ To execute tests, you will need: * TestFlows Python library (`pip3 install -r ./tests/image/requirements.txt`) * To run tests in docker container (approximately 2 times slower, but does not require any additional configuration): - `docker` - - `docker-compose` + - `docker compose` - `python3` * To run tests natively on your machine: - `kubectl` @@ -37,7 +37,7 @@ To execute the test suite (that currently involves only operator tests, not test ```bash pip3 install -U -r ./tests/image/requirements.txt docker pull registry.gitlab.com/altinity-public/container-images/clickhouse-operator-test-runner:latest -COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e.test_operator/*" +COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e?test_operator/*" ``` To execute tests natively (not in docker), you need to add `--native` parameter. @@ -47,7 +47,7 @@ Tests running in parallel by default, to run it consistently, add `--parallel of If you need only one certain test, you may execute ```bash -COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e.test_operator/test_009*" +COMPOSE_HTTP_TIMEOUT=1800 python3 ./tests/regression.py --only "/regression/e2e?test_operator/test_009*" ``` where `009` may be substituted by the number of the test you need. Tests --- numbers and names correspondence may be found in `tests/regression.py` and `tests/test_*.py` source code files. diff --git a/tests/docker-compose/docker-compose.yml b/tests/docker-compose/docker-compose.yml index dbd3ba276..578565660 100644 --- a/tests/docker-compose/docker-compose.yml +++ b/tests/docker-compose/docker-compose.yml @@ -21,7 +21,7 @@ services: - NET_ADMIN # dummy service which does nothing, but allows to postpone - # 'docker-compose up -d' till all dependecies will go healthy + # 'docker compose up -d' till all dependecies will go healthy all_services_ready: image: hello-world privileged: true diff --git a/tests/e2e/clickhouse.py b/tests/e2e/clickhouse.py index 0c4fe273e..eb467ab52 100644 --- a/tests/e2e/clickhouse.py +++ b/tests/e2e/clickhouse.py @@ -31,7 +31,7 @@ def query( return kubectl.launch( f"exec {pod_name} -n {current().context.test_namespace} -c {container}" f" --" - f" clickhouse-client -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" + f" clickhouse-client -mn --receive_timeout={timeout} -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" f' --query="{sql}"' f" 2>&1", timeout=timeout, @@ -42,7 +42,7 @@ def query( return kubectl.launch( f"exec {pod_name} -n {current().context.test_namespace} -c {container}" f" -- " - f"clickhouse-client -mn -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" + f"clickhouse-client -mn --receive_timeout={timeout} -h {host} --port={port} {user_str} {pwd_str} {advanced_params}" f'--query="{sql}"', timeout=timeout, ns=current().context.test_namespace, diff --git a/tests/e2e/manifests/chi/test-005-acm.yaml b/tests/e2e/manifests/chi/test-005-acm.yaml index 2d925213c..1bbe4b97a 100644 --- a/tests/e2e/manifests/chi/test-005-acm.yaml +++ b/tests/e2e/manifests/chi/test-005-acm.yaml @@ -13,7 +13,7 @@ spec: fsGroup: 101 containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.8.5.115 + image: clickhouse/clickhouse-server:24.8 ports: - name: http containerPort: 8123 diff --git a/tests/e2e/manifests/chi/test-006-ch-upgrade-1.yaml b/tests/e2e/manifests/chi/test-006-ch-upgrade-1.yaml index 288d048c8..e2e10a656 100644 --- a/tests/e2e/manifests/chi/test-006-ch-upgrade-1.yaml +++ b/tests/e2e/manifests/chi/test-006-ch-upgrade-1.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.3 defaults: templates: podTemplate: clickhouse-old diff --git a/tests/e2e/manifests/chi/test-006-ch-upgrade-2.yaml b/tests/e2e/manifests/chi/test-006-ch-upgrade-2.yaml index 93e42d0c5..6d75f5d11 100644 --- a/tests/e2e/manifests/chi/test-006-ch-upgrade-2.yaml +++ b/tests/e2e/manifests/chi/test-006-ch-upgrade-2.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 defaults: templates: podTemplate: clickhouse-new diff --git a/tests/e2e/manifests/chi/test-006-ch-upgrade-3.yaml b/tests/e2e/manifests/chi/test-006-ch-upgrade-3.yaml index 042318b0a..93e42d0c5 100644 --- a/tests/e2e/manifests/chi/test-006-ch-upgrade-3.yaml +++ b/tests/e2e/manifests/chi/test-006-ch-upgrade-3.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.3 defaults: templates: podTemplate: clickhouse-new diff --git a/tests/e2e/manifests/chi/test-008-operator-restart-3-1.yaml b/tests/e2e/manifests/chi/test-008-operator-restart-3-1.yaml index 1a6d06eb9..0da04117c 100644 --- a/tests/e2e/manifests/chi/test-008-operator-restart-3-1.yaml +++ b/tests/e2e/manifests/chi/test-008-operator-restart-3-1.yaml @@ -30,4 +30,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 \ No newline at end of file + image: clickhouse/clickhouse-server:24.8 \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml b/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml index 4c417e6b9..ce101e31d 100644 --- a/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml +++ b/tests/e2e/manifests/chi/test-009-operator-upgrade-2.yaml @@ -64,7 +64,7 @@ spec: spec: containers: - name: clickhouse-pod - image: altinity/clickhouse-server:23.8.11.29.altinitystable + image: clickhouse/clickhouse-server:24.8 ports: - name: http containerPort: 8123 diff --git a/tests/e2e/manifests/chi/test-011-secured-default-2.yaml b/tests/e2e/manifests/chi/test-011-secured-default-2.yaml index 47682f6c0..a150e3693 100644 --- a/tests/e2e/manifests/chi/test-011-secured-default-2.yaml +++ b/tests/e2e/manifests/chi/test-011-secured-default-2.yaml @@ -6,6 +6,8 @@ spec: useTemplates: - name: clickhouse-version configuration: + profiles: + readonly/receive_timeout: 60 users: default/profile: readonly clusters: diff --git a/tests/e2e/manifests/chi/test-015-host-network.yaml b/tests/e2e/manifests/chi/test-015-host-network.yaml index be87e8d38..85318dac5 100644 --- a/tests/e2e/manifests/chi/test-015-host-network.yaml +++ b/tests/e2e/manifests/chi/test-015-host-network.yaml @@ -41,7 +41,7 @@ spec: hostNetwork: true containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 # dnsPolicy: ClusterFirstWithHostNet diff --git a/tests/e2e/manifests/chi/test-017-multi-version.yaml b/tests/e2e/manifests/chi/test-017-multi-version.yaml index b6998a1ac..9301ed2fd 100644 --- a/tests/e2e/manifests/chi/test-017-multi-version.yaml +++ b/tests/e2e/manifests/chi/test-017-multi-version.yaml @@ -9,25 +9,25 @@ metadata: spec: templates: podTemplates: - - name: v23.3 + - name: v24.3 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.3 - - name: v23.8 + image: clickhouse/clickhouse-server:24.3 + - name: v24.8 spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 configuration: clusters: - name: default layout: shards: - templates: - podTemplate: v23.3 + podTemplate: v24.3 - templates: - podTemplate: v23.8 + podTemplate: v24.8 files: users.d/remove_database_ordinary.xml: | diff --git a/tests/e2e/manifests/chi/test-020-1-multi-volume.yaml b/tests/e2e/manifests/chi/test-020-1-multi-volume.yaml index cf6b9f5dd..1ee786a53 100644 --- a/tests/e2e/manifests/chi/test-020-1-multi-volume.yaml +++ b/tests/e2e/manifests/chi/test-020-1-multi-volume.yaml @@ -38,7 +38,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-020-2-multi-volume.yaml b/tests/e2e/manifests/chi/test-020-2-multi-volume.yaml index 8afca36f1..15d8ada4a 100644 --- a/tests/e2e/manifests/chi/test-020-2-multi-volume.yaml +++ b/tests/e2e/manifests/chi/test-020-2-multi-volume.yaml @@ -38,7 +38,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-1-rescale-volume-01.yaml b/tests/e2e/manifests/chi/test-021-1-rescale-volume-01.yaml index cf53f226e..daeafb4c9 100644 --- a/tests/e2e/manifests/chi/test-021-1-rescale-volume-01.yaml +++ b/tests/e2e/manifests/chi/test-021-1-rescale-volume-01.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-1-rescale-volume-02-enlarge-disk.yaml b/tests/e2e/manifests/chi/test-021-1-rescale-volume-02-enlarge-disk.yaml index 8ef614b77..87bf2db87 100644 --- a/tests/e2e/manifests/chi/test-021-1-rescale-volume-02-enlarge-disk.yaml +++ b/tests/e2e/manifests/chi/test-021-1-rescale-volume-02-enlarge-disk.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-1-rescale-volume-03-add-disk.yaml b/tests/e2e/manifests/chi/test-021-1-rescale-volume-03-add-disk.yaml index 16556af84..01b682ce0 100644 --- a/tests/e2e/manifests/chi/test-021-1-rescale-volume-03-add-disk.yaml +++ b/tests/e2e/manifests/chi/test-021-1-rescale-volume-03-add-disk.yaml @@ -38,7 +38,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-1-rescale-volume-04-decrease-disk.yaml b/tests/e2e/manifests/chi/test-021-1-rescale-volume-04-decrease-disk.yaml index 024691d94..277ac1844 100644 --- a/tests/e2e/manifests/chi/test-021-1-rescale-volume-04-decrease-disk.yaml +++ b/tests/e2e/manifests/chi/test-021-1-rescale-volume-04-decrease-disk.yaml @@ -38,7 +38,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-2-rescale-volume-01.yaml b/tests/e2e/manifests/chi/test-021-2-rescale-volume-01.yaml index affe2418b..965ea054d 100644 --- a/tests/e2e/manifests/chi/test-021-2-rescale-volume-01.yaml +++ b/tests/e2e/manifests/chi/test-021-2-rescale-volume-01.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-2-rescale-volume-02-enlarge-disk.yaml b/tests/e2e/manifests/chi/test-021-2-rescale-volume-02-enlarge-disk.yaml index 21e22a5e7..c4d13f3ac 100644 --- a/tests/e2e/manifests/chi/test-021-2-rescale-volume-02-enlarge-disk.yaml +++ b/tests/e2e/manifests/chi/test-021-2-rescale-volume-02-enlarge-disk.yaml @@ -29,7 +29,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-2-rescale-volume-03-add-disk.yaml b/tests/e2e/manifests/chi/test-021-2-rescale-volume-03-add-disk.yaml index f51b35eb8..31b73a305 100644 --- a/tests/e2e/manifests/chi/test-021-2-rescale-volume-03-add-disk.yaml +++ b/tests/e2e/manifests/chi/test-021-2-rescale-volume-03-add-disk.yaml @@ -40,7 +40,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-021-2-rescale-volume-04-decrease-disk.yaml b/tests/e2e/manifests/chi/test-021-2-rescale-volume-04-decrease-disk.yaml index 8b482d046..f74f04bd5 100644 --- a/tests/e2e/manifests/chi/test-021-2-rescale-volume-04-decrease-disk.yaml +++ b/tests/e2e/manifests/chi/test-021-2-rescale-volume-04-decrease-disk.yaml @@ -40,7 +40,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-022-broken-image.yaml b/tests/e2e/manifests/chi/test-022-broken-image.yaml index 9c59f7c48..9a4d03b97 100644 --- a/tests/e2e/manifests/chi/test-022-broken-image.yaml +++ b/tests/e2e/manifests/chi/test-022-broken-image.yaml @@ -5,14 +5,14 @@ metadata: spec: defaults: templates: - podTemplate: v20.3 + podTemplate: broken-image templates: podTemplates: - - name: v20.3 + - name: broken-image spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3-broken + image: clickhouse/clickhouse-server:24.8-broken configuration: clusters: - name: default diff --git a/tests/e2e/manifests/chi/test-023-auto-templates.yaml b/tests/e2e/manifests/chi/test-023-auto-templates.yaml index 503a85e1e..1a4c7e8b3 100644 --- a/tests/e2e/manifests/chi/test-023-auto-templates.yaml +++ b/tests/e2e/manifests/chi/test-023-auto-templates.yaml @@ -14,4 +14,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 \ No newline at end of file + image: clickhouse/clickhouse-server:24.8 \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-024-template-annotations-2.yaml b/tests/e2e/manifests/chi/test-024-template-annotations-2.yaml index e4dda903f..8fc8d1f45 100644 --- a/tests/e2e/manifests/chi/test-024-template-annotations-2.yaml +++ b/tests/e2e/manifests/chi/test-024-template-annotations-2.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeClaimTemplates: - name: default-volumeclaim-template reclaimPolicy: Delete diff --git a/tests/e2e/manifests/chi/test-024-template-annotations.yaml b/tests/e2e/manifests/chi/test-024-template-annotations.yaml index cd8dd2f63..d6afa283d 100644 --- a/tests/e2e/manifests/chi/test-024-template-annotations.yaml +++ b/tests/e2e/manifests/chi/test-024-template-annotations.yaml @@ -14,7 +14,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeClaimTemplates: - name: default-volumeclaim-template reclaimPolicy: Delete diff --git a/tests/e2e/manifests/chi/test-025-rescaling-2.yaml b/tests/e2e/manifests/chi/test-025-rescaling-2.yaml index 861e176c1..439fa5d0f 100644 --- a/tests/e2e/manifests/chi/test-025-rescaling-2.yaml +++ b/tests/e2e/manifests/chi/test-025-rescaling-2.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/bash" - "-c" diff --git a/tests/e2e/manifests/chi/test-025-rescaling.yaml b/tests/e2e/manifests/chi/test-025-rescaling.yaml index 0c2a2226d..5d6d7f1b8 100644 --- a/tests/e2e/manifests/chi/test-025-rescaling.yaml +++ b/tests/e2e/manifests/chi/test-025-rescaling.yaml @@ -27,7 +27,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/bash" - "-c" diff --git a/tests/e2e/manifests/chi/test-026-mixed-replicas.yaml b/tests/e2e/manifests/chi/test-026-mixed-replicas.yaml index 1a4a90f10..e97de4946 100644 --- a/tests/e2e/manifests/chi/test-026-mixed-replicas.yaml +++ b/tests/e2e/manifests/chi/test-026-mixed-replicas.yaml @@ -54,7 +54,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 - name: multi-volume spec: securityContext: @@ -63,7 +63,7 @@ spec: fsGroup: 101 containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeMounts: - name: disk1 mountPath: /var/lib/clickhouse diff --git a/tests/e2e/manifests/chi/test-029-distribution.yaml b/tests/e2e/manifests/chi/test-029-distribution.yaml index cc1001a3d..2fc992802 100644 --- a/tests/e2e/manifests/chi/test-029-distribution.yaml +++ b/tests/e2e/manifests/chi/test-029-distribution.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 podDistribution: - scope: ClickHouseInstallation type: ClickHouseAntiAffinity @@ -17,7 +17,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 podDistribution: - scope: ClickHouseInstallation type: ReplicaAntiAffinity diff --git a/tests/e2e/manifests/chi/test-032-rescaling-2.yaml b/tests/e2e/manifests/chi/test-032-rescaling-2.yaml index 2c168710f..2b2e86a83 100644 --- a/tests/e2e/manifests/chi/test-032-rescaling-2.yaml +++ b/tests/e2e/manifests/chi/test-032-rescaling-2.yaml @@ -30,4 +30,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/tests/e2e/manifests/chi/test-032-rescaling.yaml b/tests/e2e/manifests/chi/test-032-rescaling.yaml index cd07fd95c..2b2e86a83 100644 --- a/tests/e2e/manifests/chi/test-032-rescaling.yaml +++ b/tests/e2e/manifests/chi/test-032-rescaling.yaml @@ -30,4 +30,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 diff --git a/tests/e2e/manifests/chi/test-034-client.yaml b/tests/e2e/manifests/chi/test-034-client.yaml index 4300cf9c3..24451947e 100644 --- a/tests/e2e/manifests/chi/test-034-client.yaml +++ b/tests/e2e/manifests/chi/test-034-client.yaml @@ -24,7 +24,7 @@ metadata: spec: containers: - name: clickhouse-client - image: altinity/clickhouse-server:23.8.8.21.altinitystable + image: clickhouse/clickhouse-server:24.8 command: [ "/bin/sh", "-c", "sleep 3600" ] volumeMounts: - name: client-config diff --git a/tests/e2e/manifests/chi/test-041-secure-zookeeper.yaml b/tests/e2e/manifests/chi/test-041-secure-zookeeper.yaml index 1c09df419..8712ec5fc 100644 --- a/tests/e2e/manifests/chi/test-041-secure-zookeeper.yaml +++ b/tests/e2e/manifests/chi/test-041-secure-zookeeper.yaml @@ -12,7 +12,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 imagePullPolicy: IfNotPresent command: - /bin/bash diff --git a/tests/e2e/manifests/chi/test-043-0-logs-container-customizing.yaml b/tests/e2e/manifests/chi/test-043-0-logs-container-customizing.yaml index e05d37bd2..ffb3d2a6d 100644 --- a/tests/e2e/manifests/chi/test-043-0-logs-container-customizing.yaml +++ b/tests/e2e/manifests/chi/test-043-0-logs-container-customizing.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 - name: clickhouse-log image: registry.access.redhat.com/ubi8/ubi-minimal:latest command: diff --git a/tests/e2e/manifests/chi/test-043-1-logs-container-customizing.yaml b/tests/e2e/manifests/chi/test-043-1-logs-container-customizing.yaml index b439c8e04..383402afa 100644 --- a/tests/e2e/manifests/chi/test-043-1-logs-container-customizing.yaml +++ b/tests/e2e/manifests/chi/test-043-1-logs-container-customizing.yaml @@ -9,7 +9,7 @@ spec: spec: containers: - name: clickhouse - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 volumeClaimTemplates: - name: log-volume-template spec: diff --git a/tests/e2e/manifests/chi/test-044-0-slow-propagation.yaml b/tests/e2e/manifests/chi/test-044-0-slow-propagation.yaml index e073da668..45834783e 100644 --- a/tests/e2e/manifests/chi/test-044-0-slow-propagation.yaml +++ b/tests/e2e/manifests/chi/test-044-0-slow-propagation.yaml @@ -24,4 +24,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 \ No newline at end of file + image: clickhouse/clickhouse-server:24.8 \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-044-1-slow-propagation.yaml b/tests/e2e/manifests/chi/test-044-1-slow-propagation.yaml index ad5ffae79..6421f019f 100644 --- a/tests/e2e/manifests/chi/test-044-1-slow-propagation.yaml +++ b/tests/e2e/manifests/chi/test-044-1-slow-propagation.yaml @@ -26,12 +26,12 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 - name: slow-replica spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/bash" - "-c" diff --git a/tests/e2e/manifests/chi/test-044-2-slow-propagation.yaml b/tests/e2e/manifests/chi/test-044-2-slow-propagation.yaml index c76c0137d..c8ce6bec5 100644 --- a/tests/e2e/manifests/chi/test-044-2-slow-propagation.yaml +++ b/tests/e2e/manifests/chi/test-044-2-slow-propagation.yaml @@ -27,12 +27,12 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 - name: slow-replica spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:24.8 command: - "/bin/bash" - "-c" diff --git a/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml b/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml index 05adbe3db..848b1b6f9 100644 --- a/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml +++ b/tests/e2e/manifests/chi/test-046-2-clickhouse-operator-metrics.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3-broken + image: clickhouse/clickhouse-server:24.8-broken defaults: templates: podTemplate: clickhouse-new \ No newline at end of file diff --git a/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml b/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml index 02f64aa1b..0aa3e130a 100644 --- a/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml +++ b/tests/e2e/manifests/chi/test-cluster-for-alerts.yaml @@ -18,6 +18,14 @@ spec: prometheus/metrics: true prometheus/events: true prometheus/asynchronous_metrics: true + # tune for low memory + mark_cache_size: 67108864 + merge_tree/parts_to_throw_insert: 300 + merge_tree/parts_to_delay_insert: 150 +# merge_tree/merge_max_block_size: 1024 +# merge_tree/max_bytes_to_merge_at_max_space_in_pool: 1073741824 +# merge_tree/number_of_free_entries_in_pool_to_lower_max_size_of_merge: 0 +# background_schedule_pool_size: 128 zookeeper: nodes: diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml index 15875565f..645548681 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-alerts.yaml @@ -17,5 +17,5 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:24.3 + image: clickhouse/clickhouse-server:latest imagePullPolicy: Always diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml index dff9aa0d8..35cb84a80 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-backups-fake.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest - name: clickhouse-backup image: nginx:latest diff --git a/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml b/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml index 1e89ac90a..7e9aec01e 100644 --- a/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml +++ b/tests/e2e/manifests/chit/tpl-clickhouse-backups.yaml @@ -25,14 +25,15 @@ spec: fsGroup: 101 containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:latest + imagePullPolicy: Always command: - clickhouse-server - --config-file=/etc/clickhouse-server/config.xml - name: clickhouse-backup - image: altinity/clickhouse-backup:2.4.15 - imagePullPolicy: IfNotPresent + image: altinity/clickhouse-backup:latest + imagePullPolicy: Always command: - bash - -xc @@ -49,7 +50,7 @@ spec: - name: BACKUPS_TO_KEEP_REMOTE value: "3" - name: S3_ENDPOINT - value: https://minio.minio + value: http://minio.minio - name: S3_BUCKET value: clickhouse-backup - name: S3_PATH diff --git a/tests/e2e/manifests/chit/tpl-test-031.yaml b/tests/e2e/manifests/chit/tpl-test-031.yaml index d248ba8aa..1f3f4c6da 100644 --- a/tests/e2e/manifests/chit/tpl-test-031.yaml +++ b/tests/e2e/manifests/chit/tpl-test-031.yaml @@ -16,4 +16,4 @@ spec: spec: containers: - name: clickhouse-pod - image: clickhouse/clickhouse-server:23.8 + image: clickhouse/clickhouse-server:24.8 diff --git a/tests/e2e/run_tests_keeper.sh b/tests/e2e/run_tests_keeper.sh index 68318a3ff..1a4ec361e 100755 --- a/tests/e2e/run_tests_keeper.sh +++ b/tests/e2e/run_tests_keeper.sh @@ -7,4 +7,4 @@ export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_keeper/${ONLY}" --native +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_keeper/${ONLY}" --native diff --git a/tests/e2e/run_tests_local.sh b/tests/e2e/run_tests_local.sh index c9795b31a..9b9fdfb43 100755 --- a/tests/e2e/run_tests_local.sh +++ b/tests/e2e/run_tests_local.sh @@ -124,14 +124,12 @@ fi if [[ ! -z "${MINIKUBE_PRELOAD_IMAGES}" ]]; then echo "pre-load images into minikube" IMAGES=" - clickhouse/clickhouse-server:22.3 - clickhouse/clickhouse-server:22.6 - clickhouse/clickhouse-server:22.7 - clickhouse/clickhouse-server:22.8 clickhouse/clickhouse-server:23.3 clickhouse/clickhouse-server:23.8 + clickhouse/clickhouse-server:24.3 + clickhouse/clickhouse-server:24.8 clickhouse/clickhouse-server:latest - altinity/clickhouse-server:22.8.15.25.altinitystable + altinity/clickhouse-server:23.8.16.42.altinitystable docker.io/zookeeper:3.8.4 " for image in ${IMAGES}; do diff --git a/tests/e2e/run_tests_metrics.sh b/tests/e2e/run_tests_metrics.sh index 127cec49c..a167f3c17 100755 --- a/tests/e2e/run_tests_metrics.sh +++ b/tests/e2e/run_tests_metrics.sh @@ -7,4 +7,4 @@ export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_metrics_exporter/${ONLY}" --native +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_metrics_exporter/${ONLY}" --native diff --git a/tests/e2e/run_tests_operator.sh b/tests/e2e/run_tests_operator.sh index fc52050b6..01520e40e 100755 --- a/tests/e2e/run_tests_operator.sh +++ b/tests/e2e/run_tests_operator.sh @@ -7,10 +7,10 @@ export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" ONLY="${ONLY:-"*"}" -python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e.test_operator/${ONLY}" --parallel-pool ${MAX_PARALLEL} -o short --trim-results on --debug --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* -o short --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/* --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/test_008_2* --trim-results on --debug --native -o short --native -#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e.test_operator/*32* --trim-results on --debug --native -o short --native +python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_operator/${ONLY}" -o short --trim-results on --debug --native +#python3 "$CUR_DIR/../regression.py" --only="/regression/e2e?test_operator/${ONLY}" --parallel-pool ${MAX_PARALLEL} -o short --trim-results on --debug --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/* -o short --trim-results on --debug --native --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/* --trim-results on --debug --native --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/test_008_2* --trim-results on --debug --native --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/test_008_2* --trim-results on --debug --native -o short --native +#python3 "$CUR_DIR/../regression.py" --only=/regression/e2e?test_operator/*32* --trim-results on --debug --native -o short --native diff --git a/tests/e2e/run_tests_parallel.sh b/tests/e2e/run_tests_parallel.sh index cebc337cf..a7020b219 100755 --- a/tests/e2e/run_tests_parallel.sh +++ b/tests/e2e/run_tests_parallel.sh @@ -2,69 +2,29 @@ set -e CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" pip3 install -r "$CUR_DIR/../image/requirements.txt" -rm -rfv /tmp/test*.log -pad="000" -MAX_PARALLEL=${MAX_PARALLEL:-5} -export IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-"Always"}" -export OPERATOR_INSTALL="${OPERATOR_INSTALL:-"yes"}" - -function run_test_parallel() { - test_names=("$@") - run_test_cmd="" - delete_ns_cmd="" - create_ns_cmd="" - for test_name in "${test_names[@]}"; do - ns=$(echo ${test_name} | tr '_' '-') - delete_ns_cmd+="(kubectl delete ns $ns --ignore-not-found --now --timeout=600s);" - create_ns_cmd+="kubectl create ns $ns;" - # TODO randomization, currently need to avoid same 'No such file or directory: '/tmp/testflows.x.x.x.x.log' - # sleep $(echo "scale=2; $((1 + $RANDOM % 100)) / 100" | bc -l) && - run_test_cmd+="( OPERATOR_NAMESPACE=${ns} TEST_NAMESPACE=${ns} python3 $CUR_DIR/../regression.py --only=/regression/e2e.test_operator/${test_name}* --no-color --native &>/tmp/${test_name}.log && date && echo ${test_name} PASS && kubectl delete ns $ns --timeout=600s) || (echo \"TEST ${test_name} FAILED EXIT_CODE=\$?\" && cat /tmp/${test_name}.log && exit 255);" - done - echo "${delete_ns_cmd}" | xargs -P 0 -r --verbose -d ";" -n 1 bash -ce - echo "${create_ns_cmd}" | xargs -P 0 -r --verbose -d ";" -n 1 bash -ce - set +e - echo "${run_test_cmd}" | xargs -P ${MAX_PARALLEL} -r --verbose -d ";" -n 1 bash -ce - if [[ "0" != "$?" ]]; then - echo "TEST FAILED LOOK TO LOGS ABOVE" - pkill -e -f "python.+regression" - exit 1 - fi - set -e -} - -is_crd_present=$(kubectl get crd -o name | grep clickhouse.altinity.com | wc -l) -delete_chi_cmd="" -if [[ "0" != "${is_crd_present}" && "0" != $(kubectl get chi --all-namespaces -o name | wc -l ) ]]; then - while read chi; do - delete_chi_cmd+="kubectl delete chi -n ${chi};" - done < <(kubectl get chi --all-namespaces -o custom-columns=name:.metadata.namespace,name:.metadata.name | tail -n +2) - echo "${delete_chi_cmd}" | xargs -P 0 -r --verbose -d ";" -n 1 bash -ce -fi -if [[ "0" != "${is_crd_present}" ]]; then - kubectl delete crd clickhouseinstallations.clickhouse.altinity.com clickhouseinstallationtemplates.clickhouse.altinity.com clickhouseoperatorconfigurations.clickhouse.altinity.com -fi -kubectl apply -f "${CUR_DIR}/../../deploy/operator/parts/crd.yaml" - -test_list=() -test_ids=(34 6 35 11 32 1 2 3 4 5 7 10 12 13 15 17 18 22 24 25 26 27 29 33 16 23) -for i in "${test_ids[@]}"; do - test_list+=( "test_${pad:${#i}}${i}" ) +export NO_WAIT=1 +"${CUR_DIR}/../../deploy/prometheus/create-prometheus.sh" +"${CUR_DIR}/../../deploy/minio/create-minio.sh" +ONLY="*" +for test_file in ${CUR_DIR}/test_*.py; do + name=$(basename "$test_file" .py | sed 's/^test_//') + run_cmd="python3 ./tests/regression.py --only=/regression/e2e?test_${name}/${ONLY} --trim-results on -o short --native --log ./tests/raw_${name}.log && " + run_cmd+="tfs --no-colors transform compact ./tests/raw_${name}.log ./tests/compact_${name}.log && " + run_cmd+="tfs --no-colors transform nice ./tests/raw_${name}.log ./tests/nice_${name}.log.txt && " + run_cmd+="tfs --no-colors transform short ./tests/raw_${name}.log ./tests/short_${name}.log.txt && " + run_cmd+="bash -xec 'tfs --no-colors report results -a 'local run' ./tests/raw_${name}.log - --confidential --copyright 'Altinity Inc.' --logo ./tests/altinity.png | ~/venv/qa/bin/tfs --debug --no-colors document convert > ./tests/report_${name}.html'" + + run_tests+=( + "${run_cmd}" + ) done -MAX_PARALLEL=5 -run_test_parallel "${test_list[@]}" - -# allow parallel long test_XXX_X -test_list=("test_019" "test_014" "test_008" "test_020" "test_021" "test_028") -MAX_PARALLEL=5 -run_test_parallel "${test_list[@]}" - -# following test require sequenced execution (test_009 upgrade operator, test_030 delete crd) -test_list=("test_009" "test_030" "test_031") -MAX_PARALLEL=1 -run_test_parallel "${test_list[@]}" +printf "%s\n" "${run_tests[@]}" | xargs -P 2 -I {} bash -xec '{}' +test_result=$? date -echo "ALL TESTS PASSED" - +if [[ "$test_result" == "0" ]]; then + echo "ALL TESTS PASSED" +else + echo "TESTS FAILED LOOK ./tests/*.log" +fi diff --git a/tests/e2e/settings.py b/tests/e2e/settings.py index 8ffe798f9..c7ff64375 100644 --- a/tests/e2e/settings.py +++ b/tests/e2e/settings.py @@ -25,7 +25,7 @@ def get_docker_compose_path(): kubectl_cmd = ( "kubectl" if current().context.native else - f"docker-compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" + f"docker compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" ) kubectl_cmd = os.getenv("KUBECTL_CMD") if "KUBECTL_CMD" in os.environ else kubectl_cmd @@ -75,8 +75,8 @@ def get_docker_compose_path(): keeper_type = os.getenv("KEEPER_TYPE") if "KEEPER_TYPE" in os.environ else "zookeeper" # zookeeper | clickhouse_keeper prometheus_namespace = "prometheus" -prometheus_operator_version = "0.68" -prometheus_scrape_interval = 10 +prometheus_operator_version = "0.78.1" +prometheus_scrape_interval = 5 minio_version = "latest" diff --git a/tests/e2e/steps.py b/tests/e2e/steps.py index 6a88fc008..3eaae7f4b 100644 --- a/tests/e2e/steps.py +++ b/tests/e2e/steps.py @@ -32,7 +32,7 @@ def create_test_namespace(self, force=False): """Create unique test namespace for test.""" if (self.cflags & PARALLEL) and not force: - self.context.test_namespace = self.name[self.name.find('test_0'):self.name.find('. ')].replace("_", "-") + "-" + str(uuid.uuid1()) + self.context.test_namespace = self.name[self.name.find('test_0'):self.name.find('# ')].replace("_", "-") + "-" + str(uuid.uuid1()) self.context.operator_namespace = self.context.test_namespace util.create_namespace(self.context.test_namespace) util.install_operator_if_not_exist() @@ -77,7 +77,7 @@ def set_settings(self): self.context.kubectl_cmd = ( "kubectl" if current().context.native - else f"docker-compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" + else f"docker compose -f {get_docker_compose_path()[0]} exec -T runner kubectl" ) self.context.kubectl_cmd = define("kubectl_cmd", os.getenv("KUBECTL_CMD") if "KUBECTL_CMD" in os.environ else self.context.kubectl_cmd) diff --git a/tests/e2e/test_backup_alerts.py b/tests/e2e/test_backup_alerts.py index 490bc5536..b3618cd52 100644 --- a/tests/e2e/test_backup_alerts.py +++ b/tests/e2e/test_backup_alerts.py @@ -1,3 +1,6 @@ +import os +os.environ["TEST_NAMESPACE"]="test-backup-alerts" + import json import random import time @@ -25,7 +28,7 @@ def get_minio_spec(): def exec_on_backup_container( backup_pod, cmd, - ns=settings.test_namespace, + ns, ok_to_fail=False, timeout=60, container="clickhouse-backup", @@ -60,12 +63,13 @@ def is_expected_backup_status(command_name, command_is_done, st, expected_status return False, command_is_done -def wait_backup_command_status(backup_pod, command_name, expected_status="success", err_status="error"): +def wait_backup_command_status(backup_pod, command_name, ns, expected_status="success", err_status="error"): command_is_done = False with Then(f'wait "{command_name}" with status "{expected_status}"'): while command_is_done is False: status_lines = exec_on_backup_container( - backup_pod, f'curl -sL "http://127.0.0.1:7171/backup/status"' + backup_pod, f'curl -sL "http://127.0.0.1:7171/backup/status"', + ns=ns ).splitlines() for line in status_lines: st = json.loads(line) @@ -161,7 +165,7 @@ def test_minio_setup(self, chi, minio_spec): @TestScenario -@Name("test_backup_is_success. Basic backup scenario") +@Name("test_backup_is_success# Basic backup scenario") def test_backup_is_success(self, chi, minio_spec): _, _, backup_pod, _ = alerts.random_pod_choice_for_callbacks(chi) backup_name = prepare_table_for_backup(backup_pod, chi) @@ -173,21 +177,26 @@ def test_backup_is_success(self, chi, minio_spec): "clickhouse_backup_successful_backups|clickhouse_backup_successful_creates", ns=self.context.test_namespace ) - list_before = exec_on_backup_container(backup_pod, "curl -sL http://127.0.0.1:7171/backup/list") + list_before = exec_on_backup_container(backup_pod, "curl -sL http://127.0.0.1:7171/backup/list", self.context.test_namespace) exec_on_backup_container( backup_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success", ns=self.context.test_namespace,) exec_on_backup_container( backup_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/upload/{backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(backup_pod, f"upload {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, f"upload {backup_name}", expected_status="success", ns=self.context.test_namespace) with Then("list of backups shall changed"): - list_after = exec_on_backup_container(backup_pod, "curl -sL http://127.0.0.1:7171/backup/list") + list_after = exec_on_backup_container( + backup_pod, "curl -sL http://127.0.0.1:7171/backup/list", + ns=self.context.test_namespace + ) assert list_before != list_after, error("backup is not created") with Then("successful backup count shall increased"): @@ -202,13 +211,13 @@ def test_backup_is_success(self, chi, minio_spec): @TestScenario -@Name("test_backup_is_down. ClickHouseBackupDown and ClickHouseBackupRecentlyRestart alerts") +@Name("test_backup_is_down# ClickHouseBackupDown and ClickHouseBackupRecentlyRestart alerts") def test_backup_is_down(self, chi, minio_spec): reboot_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi) def reboot_backup_container(): kubectl.launch( - f"exec -n {settings.test_namespace} {reboot_pod} -c clickhouse-backup -- kill 1", + f"exec -n {self.context.test_namespace} {reboot_pod} -c clickhouse-backup -- kill 1", ok_to_fail=True, ) @@ -257,7 +266,7 @@ def reboot_backup_container(): @TestScenario -@Name("test_backup_failed. Check ClickHouseBackupFailed alerts") +@Name("test_backup_failed# Check ClickHouseBackupFailed alerts") def test_backup_failed(self, chi, minio_spec): backup_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi) backup_prefix = prepare_table_for_backup(backup_pod, chi) @@ -268,20 +277,20 @@ def create_fail_backup(): backup_name = backup_prefix + "-" + str(random.randint(1, 4096)) backup_dir = f"/var/lib/clickhouse/backup/{backup_name}/shadow/default/test_backup" kubectl.launch( - f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- bash -c 'mkdir -v -m 0400 -p {backup_dir}'", + f"exec -n {self.context.test_namespace} {backup_pod} -c clickhouse-backup -- bash -c 'mkdir -v -m 0400 -p {backup_dir}'", ) kubectl.launch( - f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", + f"exec -n {self.context.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", ) - wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="error") + wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="error", ns=self.context.test_namespace) def create_success_backup(): backup_name = backup_prefix + "-" + str(random.randint(1, 4096)) kubectl.launch( - f"exec -n {settings.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", + f"exec -n {self.context.test_namespace} {backup_pod} -c clickhouse-backup -- curl -X POST -sL http://127.0.0.1:7171/backup/create?name={backup_name}", ) - wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, command_name=f"create {backup_name}", expected_status="success", ns=self.context.test_namespace) with When("clickhouse-backup create failed"): fired = alerts.wait_alert_state( @@ -307,7 +316,7 @@ def create_success_backup(): @TestScenario -@Name("test_backup_duration. Check ClickHouseBackupTooShort and ClickHouseBackupTooLong alerts") +@Name("test_backup_duration# Check ClickHouseBackupTooShort and ClickHouseBackupTooLong alerts") def test_backup_duration(self, chi, minio_spec): short_pod, _, long_pod, _ = alerts.random_pod_choice_for_callbacks(chi) apply_fake_backup("prepare fake backup duration metric") @@ -374,7 +383,7 @@ def test_backup_duration(self, chi, minio_spec): @TestScenario -@Name("test_backup_size. Check ClickHouseBackupSizeChanged alerts") +@Name("test_backup_size# Check ClickHouseBackupSizeChanged alerts") def test_backup_size(self, chi, minio_spec): decrease_pod, _, increase_pod, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -395,8 +404,9 @@ def test_backup_size(self, chi, minio_spec): exec_on_backup_container( backup_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success") + wait_backup_command_status(backup_pod, f"create {backup_name}", expected_status="success", ns=self.context.test_namespace) if decrease: clickhouse.query( chi["metadata"]["name"], @@ -425,7 +435,7 @@ def test_backup_size(self, chi, minio_spec): @TestScenario -@Name("test_backup_not_run. Check ClickhouseBackupDoesntRunTooLong alert") +@Name("test_backup_not_run# Check ClickhouseBackupDoesntRunTooLong alert") def test_backup_not_run(self, chi, minio_spec): not_run_pod, _, _, _ = alerts.random_pod_choice_for_callbacks(chi) apply_fake_backup("prepare fake backup for time metric") @@ -462,14 +472,16 @@ def test_backup_not_run(self, chi, minio_spec): exec_on_backup_container( not_run_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/create?name={backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(not_run_pod, f"create {backup_name}", expected_status="success") + wait_backup_command_status(not_run_pod, f"create {backup_name}", expected_status="success", ns=self.context.test_namespace) exec_on_backup_container( not_run_pod, f'curl -X POST -sL "http://127.0.0.1:7171/backup/upload/{backup_name}"', + ns=self.context.test_namespace ) - wait_backup_command_status(not_run_pod, f"upload {backup_name}", expected_status="success") + wait_backup_command_status(not_run_pod, f"upload {backup_name}", expected_status="success", ns=self.context.test_namespace) with Then("check ClickhouseBackupDoesntRunTooLong gone away"): resolved = alerts.wait_alert_state( @@ -502,7 +514,7 @@ def test(self): minio_spec = get_minio_spec() with Module("backup_alerts"): - test_cases = [ + all_tests = [ test_backup_is_success, test_backup_is_down, test_backup_failed, @@ -510,5 +522,7 @@ def test(self): test_backup_size, test_backup_not_run, ] - for t in test_cases: + for t in all_tests: Scenario(test=t)(chi=chi, minio_spec=minio_spec) + + util.clean_namespace(delete_chi=True, delete_keeper=True, namespace=self.context.test_namespace) diff --git a/tests/e2e/test_clickhouse.py b/tests/e2e/test_clickhouse.py index 7ede862b1..181df4f24 100644 --- a/tests/e2e/test_clickhouse.py +++ b/tests/e2e/test_clickhouse.py @@ -1,17 +1,20 @@ +import os +os.environ["TEST_NAMESPACE"]="test-clickhouse" + import time import e2e.clickhouse as clickhouse import e2e.kubectl as kubectl import e2e.yaml_manifest as yaml_manifest -import e2e.settings as settings import e2e.util as util +import e2e.steps as steps from testflows.core import * from testflows.asserts import error @TestScenario -@Name("test_ch_001. Insert quorum") +@Name("test_ch_001: Insert quorum") def test_ch_001(self): util.require_keeper(keeper_type=self.context.keeper_type) quorum_template = "manifests/chit/tpl-clickhouse-stable.yaml" @@ -19,7 +22,7 @@ def test_ch_001(self): kubectl.launch( f"delete chit {chit_data['metadata']['name']}", - ns=settings.test_namespace, + ns=self.context.test_namespace, ok_to_fail=True, ) kubectl.create_and_check( @@ -32,7 +35,7 @@ def test_ch_001(self): ) chi = yaml_manifest.get_name(util.get_full_path("manifests/chi/test-ch-001-insert-quorum.yaml")) - chi_data = kubectl.get("chi", ns=settings.test_namespace, name=chi) + chi_data = kubectl.get("chi", ns=self.context.test_namespace, name=chi) util.wait_clickhouse_cluster_ready(chi_data) host0 = "chi-test-ch-001-insert-quorum-default-0-0" @@ -112,9 +115,7 @@ def test_ch_001(self): with When("Resume fetches for t2 at replica1"): clickhouse.query(chi, "system start fetches default.t2", host=host1) i = 0 - while ( - "2" - != clickhouse.query( + while ("2" != clickhouse.query( chi, "select active_replicas from system.replicas where database='default' and table='t1'", pod=host0, @@ -156,7 +157,7 @@ def test_ch_001(self): @TestScenario -@Name("test_ch_002. Row-level security") +@Name("test_ch_002: Row-level security") def test_ch_002(self): kubectl.create_and_check( "manifests/chi/test-ch-002-row-level.yaml", @@ -198,16 +199,23 @@ def test_ch_002(self): @TestFeature @Name("e2e.test_clickhouse") def test(self): - util.clean_namespace(delete_chi=False) + with Given("set settings"): + steps.set_settings() + with Given("I create shell"): + shell = steps.get_shell() + self.context.shell = shell + + util.clean_namespace(delete_chi=True, delete_keeper=True, namespace=self.context.test_namespace) + util.install_operator_if_not_exist() all_tests = [ test_ch_001, test_ch_002, ] - run_test = all_tests - # placeholder for selective test running - # run_test = [test_ch_002] + # all_tests = [test_ch_002] - for t in run_test: + for t in all_tests: Scenario(test=t)() + + util.clean_namespace(delete_chi=True, delete_keeper=True, namespace=self.context.test_namespace) diff --git a/tests/e2e/test_examples.py b/tests/e2e/test_examples.py index 1c8d07d19..839f11fd7 100644 --- a/tests/e2e/test_examples.py +++ b/tests/e2e/test_examples.py @@ -1,10 +1,14 @@ +import os +os.environ["TEST_NAMESPACE"]="test-examples" + from testflows.core import * import e2e.kubectl as kubectl import e2e.util as util +import e2e.steps as steps @TestScenario -@Name("test_examples01_1. Empty installation, creates 1 node") +@Name("test_examples01_1: Empty installation, creates 1 node") def test_examples01_1(self): kubectl.create_and_check( manifest="../../docs/chi-examples/01-simple-layout-01-1shard-1repl.yaml", @@ -19,7 +23,7 @@ def test_examples01_1(self): @TestScenario -@Name("test_examples01_2. 1 shard 2 replicas") +@Name("test_examples01_2: 1 shard 2 replicas") def test_examples01_2(self): kubectl.create_and_check( manifest="../../docs/chi-examples/01-simple-layout-02-1shard-2repl.yaml", @@ -34,7 +38,7 @@ def test_examples01_2(self): @TestScenario -@Name("test_examples02_1. Persistent volume mapping via defaults") +@Name("test_examples02_1: Persistent volume mapping via defaults") def test_examples02_1(self): kubectl.create_and_check( manifest="../../docs/chi-examples/03-persistent-volume-01-default-volume.yaml", @@ -49,13 +53,13 @@ def test_examples02_1(self): @TestScenario -@Name("test_examples02_2. Persistent volume mapping via podTemplate") +@Name("test_examples02_2: Persistent volume mapping via podTemplate") def test_examples02_2(self): kubectl.create_and_check( manifest="../../docs/chi-examples/03-persistent-volume-02-pod-template.yaml", check={ "pod_count": 1, - "pod_image": "clickhouse/clickhouse-server:23.8", + "pod_image": "clickhouse/clickhouse-server:24.8", "pod_volumes": { "/var/lib/clickhouse", "/var/log/clickhouse-server", @@ -67,7 +71,17 @@ def test_examples02_2(self): @TestFeature @Name("e2e.test_examples") def test(self): + with Given("set settings"): + steps.set_settings() + self.context.test_namespace = "test-examples" + self.context.operator_namespace = "test-examples" + with Given("I create shell"): + shell = steps.get_shell() + self.context.shell = shell + util.clean_namespace(delete_chi=False) + util.install_operator_if_not_exist() + examples = [ test_examples01_1, test_examples01_2, @@ -76,3 +90,5 @@ def test(self): ] for t in examples: Scenario(test=t)() + + util.clean_namespace(delete_chi=False) diff --git a/tests/e2e/test_keeper.py b/tests/e2e/test_keeper.py index ac31ea246..23401b23f 100644 --- a/tests/e2e/test_keeper.py +++ b/tests/e2e/test_keeper.py @@ -1,4 +1,5 @@ -import time +import os +os.environ["TEST_NAMESPACE"]="test-keeper" import e2e.clickhouse as clickhouse import e2e.kubectl as kubectl @@ -59,7 +60,7 @@ def insert_replicated_data(chi, pod_for_insert_data, create_tables, insert_table ) -def check_zk_root_znode(chi, keeper_type, pod_count, retry_count=15): +def check_zk_root_znode(chi, keeper_type, pod_count, ns, retry_count=15): for pod_num in range(pod_count): found = False for i in range(retry_count): @@ -82,7 +83,7 @@ def check_zk_root_znode(chi, keeper_type, pod_count, retry_count=15): out = kubectl.launch( f"exec {pod_prefix}-{pod_num} -- bash -ce '{keeper_cmd}'", - ns=settings.test_namespace, + ns=ns, ok_to_fail=True, ) found = False @@ -144,17 +145,17 @@ def rescale_zk_and_clickhouse( return chi -def delete_keeper_pvc(keeper_type): +def delete_keeper_pvc(keeper_type, ns): pvc_list = kubectl.get( kind="pvc", name="", label=f"-l app={keeper_type}", - ns=settings.test_namespace, + ns=ns, ok_to_fail=False, ) for pvc in pvc_list["items"]: if pvc["metadata"]["name"][-2:] != "-0": - kubectl.launch(f"delete pvc {pvc['metadata']['name']}", ns=settings.test_namespace) + kubectl.launch(f"delete pvc {pvc['metadata']['name']}", ns=ns) def start_stop_zk_and_clickhouse(chi_name, ch_stop, keeper_replica_count, keeper_type, keeper_manifest_1_node, @@ -167,6 +168,8 @@ def start_stop_zk_and_clickhouse(chi_name, ch_stop, keeper_replica_count, keeper keeper_manifest = f"../../deploy/zookeeper/zookeeper-manually/quick-start-persistent-volume/{keeper_manifest}" if keeper_type == "clickhouse-keeper": keeper_manifest = f"../../deploy/clickhouse-keeper/clickhouse-keeper-manually/{keeper_manifest}" + if keeper_type == "clickhouse-keeper_with_chk": + keeper_manifest = f"../../deploy/clickhouse-keeper/clickhouse-keeper-with-CHK-resource/{keeper_manifest}" if keeper_type == "zookeeper-operator": keeper_manifest = f"../../deploy/zookeeper/zookeeper-with-zookeeper-operator/{keeper_manifest}" @@ -209,8 +212,8 @@ def test_keeper_rescale_outline( """ with When("Clean exists ClickHouse Keeper and ZooKeeper"): - kubectl.delete_all_keeper(settings.test_namespace) - kubectl.delete_all_chi(settings.test_namespace) + kubectl.delete_all_keeper(self.context.test_namespace) + kubectl.delete_all_chi(self.context.test_namespace) with When("Install CH 1 node ZK 1 node"): chi = rescale_zk_and_clickhouse( @@ -223,7 +226,7 @@ def test_keeper_rescale_outline( ) util.wait_clickhouse_cluster_ready(chi) wait_keeper_ready(keeper_type=keeper_type, pod_count=1) - check_zk_root_znode(chi, keeper_type, pod_count=1) + check_zk_root_znode(chi, keeper_type, pod_count=1, ns=self.context.test_namespace) util.wait_clickhouse_no_readonly_replicas(chi) insert_replicated_data( chi, @@ -244,7 +247,7 @@ def test_keeper_rescale_outline( keeper_manifest_3_node=keeper_manifest_3_node, ) wait_keeper_ready(keeper_type=keeper_type, pod_count=3) - check_zk_root_znode(chi, keeper_type, pod_count=3) + check_zk_root_znode(chi, keeper_type, pod_count=3, ns=self.context.test_namespace) util.wait_clickhouse_cluster_ready(chi) util.wait_clickhouse_no_readonly_replicas(chi) @@ -264,9 +267,9 @@ def test_keeper_rescale_outline( keeper_manifest_3_node=keeper_manifest_3_node, ) wait_keeper_ready(keeper_type=keeper_type, pod_count=1) - check_zk_root_znode(chi, keeper_type, pod_count=1) + check_zk_root_znode(chi, keeper_type, pod_count=1, ns=self.context.test_namespace) if keeper_type == "zookeeper" and "scaleout-pvc" in keeper_manifest_1_node: - delete_keeper_pvc(keeper_type=keeper_type) + delete_keeper_pvc(keeper_type=keeper_type, ns=self.context.test_namespace) util.wait_clickhouse_cluster_ready(chi) util.wait_clickhouse_no_readonly_replicas(chi) @@ -285,7 +288,7 @@ def test_keeper_rescale_outline( keeper_manifest_1_node=keeper_manifest_1_node, keeper_manifest_3_node=keeper_manifest_3_node, ) - check_zk_root_znode(chi, keeper_type, pod_count=3) + check_zk_root_znode(chi, keeper_type, pod_count=3, ns=self.context.test_namespace) for keeper_replica_count in [1, 3]: with When("Stop CH + ZK"): @@ -308,7 +311,7 @@ def test_keeper_rescale_outline( ) with Then("check data in tables"): - check_zk_root_znode(chi, keeper_type, pod_count=3) + check_zk_root_znode(chi, keeper_type, pod_count=3, ns=self.context.test_namespace) util.wait_clickhouse_cluster_ready(chi) util.wait_clickhouse_no_readonly_replicas(chi) for table_name, exptected_rows in { @@ -331,7 +334,7 @@ def test_keeper_rescale_outline( @TestScenario -@Name("test_zookeeper_rescale. Check ZK scale-up / scale-down cases") +@Name("test_zookeeper_rescale# Check ZK scale-up / scale-down cases") def test_zookeeper_rescale(self): test_keeper_rescale_outline( keeper_type="zookeeper", @@ -342,7 +345,7 @@ def test_zookeeper_rescale(self): @TestScenario -@Name("test_clickhouse_keeper_rescale. Check KEEPER scale-up / scale-down cases") +@Name("test_clickhouse_keeper_rescale# Check KEEPER scale-up / scale-down cases") def test_clickhouse_keeper_rescale(self): test_keeper_rescale_outline( keeper_type="clickhouse-keeper", @@ -353,7 +356,7 @@ def test_clickhouse_keeper_rescale(self): @TestScenario -@Name("test_clickhouse_keeper_rescale_chk. Using ClickHouseKeeperInstallation. Check KEEPER scale-up / scale-down cases") +@Name("test_clickhouse_keeper_rescale_chk# Using ClickHouseKeeperInstallation. Check KEEPER scale-up / scale-down cases") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0")) def test_clickhouse_keeper_rescale_chk(self): test_keeper_rescale_outline( @@ -364,26 +367,26 @@ def test_clickhouse_keeper_rescale_chk(self): ) -@TestScenario -@Name("test_zookeeper_operator_rescale. Check Zookeeper OPERATOR scale-up / scale-down cases") -def test_zookeeper_operator_rescale(self): - test_keeper_rescale_outline( - keeper_type="zookeeper-operator", - pod_for_insert_data="chi-test-cluster-for-zk-default-0-1-0", - keeper_manifest_1_node="zookeeper-operator-1-node.yaml", - keeper_manifest_3_node="zookeeper-operator-3-nodes.yaml", - ) +# @TestScenario +# @Name("test_zookeeper_operator_rescale# Check Zookeeper OPERATOR scale-up / scale-down cases") +# def test_zookeeper_operator_rescale(self): +# test_keeper_rescale_outline( +# keeper_type="zookeeper-operator", +# pod_for_insert_data="chi-test-cluster-for-zk-default-0-1-0", +# keeper_manifest_1_node="zookeeper-operator-1-node.yaml", +# keeper_manifest_3_node="zookeeper-operator-3-nodes.yaml", +# ) -@TestScenario -@Name("test_zookeeper_pvc_scaleout_rescale. Check ZK+PVC scale-up / scale-down cases") -def test_zookeeper_pvc_scaleout_rescale(self): - test_keeper_rescale_outline( - keeper_type="zookeeper", - pod_for_insert_data="chi-test-cluster-for-zk-default-0-1-0", - keeper_manifest_1_node="zookeeper-1-node-1GB-for-tests-only-scaleout-pvc.yaml", - keeper_manifest_3_node="zookeeper-3-nodes-1GB-for-tests-only-scaleout-pvc.yaml", - ) +# @TestScenario +# @Name("test_zookeeper_pvc_scaleout_rescale# Check ZK+PVC scale-up / scale-down cases") +# def test_zookeeper_pvc_scaleout_rescale(self): +# test_keeper_rescale_outline( +# keeper_type="zookeeper", +# pod_for_insert_data="chi-test-cluster-for-zk-default-0-1-0", +# keeper_manifest_1_node="zookeeper-1-node-1GB-for-tests-only-scaleout-pvc.yaml", +# keeper_manifest_3_node="zookeeper-3-nodes-1GB-for-tests-only-scaleout-pvc.yaml", +# ) @TestOutline @@ -394,8 +397,8 @@ def test_keeper_probes_outline( keeper_manifest_3_node="zookeeper-3-nodes-1GB-for-tests-only.yaml", ): with When("Clean exists ClickHouse Keeper and ZooKeeper"): - kubectl.delete_all_chi(settings.test_namespace) - kubectl.delete_all_keeper(settings.test_namespace) + kubectl.delete_all_chi(self.context.test_namespace) + kubectl.delete_all_keeper(self.context.test_namespace) with Then("Install CH 2 node ZK 3 node"): chi = rescale_zk_and_clickhouse( @@ -409,7 +412,7 @@ def test_keeper_probes_outline( ) util.wait_clickhouse_cluster_ready(chi) wait_keeper_ready(keeper_type=keeper_type, pod_count=3) - check_zk_root_znode(chi, keeper_type, pod_count=3) + check_zk_root_znode(chi, keeper_type, pod_count=3, ns=self.context.test_namespace) util.wait_clickhouse_no_readonly_replicas(chi) with Then("Create keeper_bench table"): @@ -430,7 +433,7 @@ def test_keeper_probes_outline( max_parts_in_total=1000000; """, ) - with Then("Insert data to keeper_bench for make zookeeper workload"): + with Then("Insert data to keeper_bench for make keeper workload"): pod_prefix = "chi-test-cluster-for-zk-default" rows = 100000 for pod in ("0-0-0", "0-1-0"): @@ -465,7 +468,7 @@ def test_keeper_probes_outline( @TestScenario @Name( - "test_zookeeper_probes_workload. Liveness + Readiness probes shall works fine " + "test_zookeeper_probes_workload# Liveness + Readiness probes shall works fine " "under workload in multi-datacenter installation" ) def test_zookeeper_probes_workload(self): @@ -476,39 +479,39 @@ def test_zookeeper_probes_workload(self): ) -@TestScenario -@Name( - "test_zookeeper_pvc_probes_workload. Liveness + Readiness probes shall works fine " - "under workload in multi-datacenter installation" -) -def test_zookeeper_pvc_probes_workload(self): - test_keeper_probes_outline( - keeper_type="zookeeper", - keeper_manifest_1_node="zookeeper-1-node-1GB-for-tests-only-scaleout-pvc.yaml", - keeper_manifest_3_node="zookeeper-3-nodes-1GB-for-tests-only-scaleout-pvc.yaml", - ) - - -@TestScenario -@Name( - "test_zookeeper_operator_probes_workload. Liveness + Readiness probes shall works fine " - "under workload in multi-datacenter installation" -) -def test_zookeeper_operator_probes_workload(self): - test_keeper_probes_outline( - keeper_type="zookeeper-operator", - keeper_manifest_1_node="zookeeper-operator-1-node.yaml", - keeper_manifest_3_node="zookeeper-operator-3-nodes.yaml", - - # uncomment only if you know how to use it - # keeper_manifest_1_node='zookeeper-operator-1-node-with-custom-probes.yaml', - # keeper_manifest_3_node='zookeeper-operator-3-nodes-with-custom-probes.yaml', - ) +# @TestScenario +# @Name( +# "test_zookeeper_pvc_probes_workload# Liveness + Readiness probes shall works fine " +# "under workload in multi-datacenter installation" +# ) +# def test_zookeeper_pvc_probes_workload(self): +# test_keeper_probes_outline( +# keeper_type="zookeeper", +# keeper_manifest_1_node="zookeeper-1-node-1GB-for-tests-only-scaleout-pvc.yaml", +# keeper_manifest_3_node="zookeeper-3-nodes-1GB-for-tests-only-scaleout-pvc.yaml", +# ) + + +# @TestScenario +# @Name( +# "test_zookeeper_operator_probes_workload# Liveness + Readiness probes shall works fine " +# "under workload in multi-datacenter installation" +# ) +# def test_zookeeper_operator_probes_workload(self): +# test_keeper_probes_outline( +# keeper_type="zookeeper-operator", +# keeper_manifest_1_node="zookeeper-operator-1-node.yaml", +# keeper_manifest_3_node="zookeeper-operator-3-nodes.yaml", +# +# # uncomment only if you know how to use it +# # keeper_manifest_1_node='zookeeper-operator-1-node-with-custom-probes.yaml', +# # keeper_manifest_3_node='zookeeper-operator-3-nodes-with-custom-probes.yaml', +# ) @TestScenario @Name( - "test_clickhouse_keeper_probes_workload. Liveness + Readiness probes shall works fine " + "test_clickhouse_keeper_probes_workload# Liveness + Readiness probes shall works fine " "under workload in multi-datacenter installation" ) def test_clickhouse_keeper_probes_workload(self): @@ -521,7 +524,7 @@ def test_clickhouse_keeper_probes_workload(self): @TestScenario @Name( - "test_clickhouse_keeper_probes_workload_with_CHKI. Liveness + Readiness probes shall works fine " + "test_clickhouse_keeper_probes_workload_with_chk# Liveness + Readiness probes shall works fine " "under workload in multi-datacenter installation" ) @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0")) @@ -538,27 +541,30 @@ def test_clickhouse_keeper_probes_workload_with_chk(self): def test(self): with Given("set settings"): set_settings() - self.context.test_namespace = "test" - self.context.operator_namespace = "test" + self.context.test_namespace = "test-keeper" + self.context.operator_namespace = "test-keeper" with Given("I create shell"): shell = get_shell() self.context.shell = shell + + util.clean_namespace(delete_chi=True, delete_keeper=True) + util.install_operator_if_not_exist() + all_tests = [ - test_zookeeper_operator_rescale, + # test_zookeeper_operator_rescale, + # test_zookeeper_pvc_scaleout_rescale, test_clickhouse_keeper_rescale, test_clickhouse_keeper_rescale_chk, - test_zookeeper_pvc_scaleout_rescale, test_zookeeper_rescale, + # test_zookeeper_pvc_probes_workload, + # test_zookeeper_operator_probes_workload, test_zookeeper_probes_workload, - test_zookeeper_pvc_probes_workload, - test_zookeeper_operator_probes_workload, test_clickhouse_keeper_probes_workload, test_clickhouse_keeper_probes_workload_with_chk, ] - - util.clean_namespace(delete_chi=True, delete_keeper=True) - util.install_operator_if_not_exist() for t in all_tests: Scenario(test=t)() + + util.clean_namespace(delete_chi=True, delete_keeper=True) diff --git a/tests/e2e/test_metrics_alerts.py b/tests/e2e/test_metrics_alerts.py index cb31ad6da..236f6258f 100644 --- a/tests/e2e/test_metrics_alerts.py +++ b/tests/e2e/test_metrics_alerts.py @@ -1,3 +1,7 @@ +import json +import os +os.environ["TEST_NAMESPACE"]="test-keeper" + import re import time import random @@ -15,13 +19,13 @@ @TestScenario -@Name("test_prometheus_setup. Check clickhouse-operator/prometheus/alertmanager setup") +@Name("test_prometheus_setup# Check clickhouse-operator/prometheus/alertmanager setup") def test_prometheus_setup(self, prometheus_operator_spec, clickhouse_operator_spec, chi): with Given("clickhouse-operator is installed"): assert ( kubectl.get_count( "pod", - ns=settings.operator_namespace, + ns=self.context.operator_namespace, label="-l app=clickhouse-operator", ) > 0 @@ -64,12 +68,12 @@ def test_prometheus_setup(self, prometheus_operator_spec, clickhouse_operator_sp @TestScenario -@Name("test_metrics_exporter_down. Check ClickHouseMetricsExporterDown") +@Name("test_metrics_exporter_down# Check ClickHouseMetricsExporterDown") def test_metrics_exporter_down(self, prometheus_operator_spec, clickhouse_operator_spec, chi): def reboot_metrics_exporter(): clickhouse_operator_pod = clickhouse_operator_spec["items"][0]["metadata"]["name"] kubectl.launch( - f"exec -n {settings.operator_namespace} {clickhouse_operator_pod} -c metrics-exporter -- sh -c 'kill 1'", + f"exec -n {self.context.operator_namespace} {clickhouse_operator_pod} -c metrics-exporter -- sh -c 'kill 1'", ok_to_fail=True, ) @@ -89,7 +93,7 @@ def reboot_metrics_exporter(): @TestScenario -@Name("test_clickhouse_server_reboot. Check ClickHouseServerDown, ClickHouseServerRestartRecently") +@Name("test_clickhouse_server_reboot# Check ClickHouseServerDown, ClickHouseServerRestartRecently") def test_clickhouse_server_reboot(self, prometheus_operator_spec, clickhouse_operator_spec, chi): random_idx = random.randint(0, 1) clickhouse_pod = chi["status"]["pods"][random_idx] @@ -149,7 +153,7 @@ def reboot_clickhouse_server(): @TestScenario -@Name("test_clickhouse_dns_errors. Check ClickHouseDNSErrors") +@Name("test_clickhouse_dns_errors# Check ClickHouseDNSErrors") def test_clickhouse_dns_errors(self, prometheus_operator_spec, clickhouse_operator_spec, chi): random_idx = random.randint(0, 1) clickhouse_pod = chi["status"]["pods"][random_idx] @@ -196,7 +200,7 @@ def rewrite_dns_on_clickhouse_server(write_new=True): @TestScenario -@Name("test_distributed_files_to_insert. Check ClickHouseDistributedFilesToInsertHigh") +@Name("test_distributed_files_to_insert# Check ClickHouseDistributedFilesToInsertHigh") def test_distributed_files_to_insert(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( delayed_pod, @@ -276,7 +280,7 @@ def test_distributed_files_to_insert(self, prometheus_operator_spec, clickhouse_ @TestScenario -@Name("test_distributed_connection_exceptions. Check ClickHouseDistributedConnectionExceptions") +@Name("test_distributed_connection_exceptions# Check ClickHouseDistributedConnectionExceptions") def test_distributed_connection_exceptions(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( delayed_pod, @@ -340,12 +344,14 @@ def reboot_clickhouse_and_distributed_exection(): "true", ns=self.context.test_namespace, ) + # why connection refused if /ping return ok ? ;( + time.sleep(5) clickhouse.drop_distributed_table_on_cluster(chi) @TestScenario @Name( - "test_insert_related_alerts. Check ClickHouseRejectedInsert, ClickHouseDelayedInsertThrottling, ClickHouseMaxPartCountForPartition, ClickHouseLowInsertedRowsPerQuery" + "test_insert_related_alerts# Check ClickHouseRejectedInsert, ClickHouseDelayedInsertThrottling, ClickHouseMaxPartCountForPartition, ClickHouseLowInsertedRowsPerQuery" ) def test_insert_related_alerts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): clickhouse.create_table_on_cluster(chi) @@ -357,10 +363,16 @@ def test_insert_related_alerts(self, prometheus_operator_spec, clickhouse_operat ) = alerts.random_pod_choice_for_callbacks(chi) prometheus_scrape_interval = settings.prometheus_scrape_interval + chi_name = chi["metadata"]["name"] # default values in system.merge_tree_settings + settings_json = json.loads(clickhouse.query(chi_name, "SELECT name, value FROM system.merge_tree_settings WHERE name IN ('parts_to_delay_insert','parts_to_throw_insert') FORMAT JSONCompact")) parts_to_throw_insert = 300 parts_to_delay_insert = 150 - chi_name = chi["metadata"]["name"] + for row in settings_json["data"]: + if row[0] == "parts_to_throw_insert": + parts_to_throw_insert = row[1] + if row[0] == "parts_to_delay_insert": + parts_to_delay_insert = row[1] parts_limits = parts_to_delay_insert selected_svc = delayed_svc @@ -375,13 +387,13 @@ def insert_many_parts_to_clickhouse(): + min_block + f"INSERT INTO default.test(event_time, test) SELECT now(),number FROM system.numbers LIMIT {r};" ) - clickhouse.query(chi_name, sql, host=selected_svc, ns=self.context.test_namespace) + clickhouse.query(chi_name, sql, host=selected_svc, ns=self.context.test_namespace, timeout=600) sql = ( min_block + "INSERT INTO default.test(event_time, test) SELECT now(), number FROM system.numbers LIMIT 1;" ) - clickhouse.query_with_error(chi_name, sql, host=selected_svc, ns=self.context.test_namespace) + clickhouse.query_with_error(chi_name, sql, host=selected_svc, ns=self.context.test_namespace, timeout=300) with Then(f"wait prometheus_scrape_interval={prometheus_scrape_interval}*2 sec"): time.sleep(prometheus_scrape_interval * 2) @@ -454,6 +466,13 @@ def insert_many_parts_to_clickhouse(): ) assert resolved, error("can't check ClickHouseLowInsertedRowsPerQuery alert is gone away") + clickhouse.query( + chi_name, + "OPTIMIZE TABLE default.test FINAL;SELECT count() FROM system.parts WHERE active AND database='default' AND table='test'", + host=selected_svc, + ns=self.context.test_namespace, + ) + parts_limits = parts_to_throw_insert selected_svc = rejected_svc insert_many_parts_to_clickhouse() @@ -487,7 +506,7 @@ def insert_many_parts_to_clickhouse(): @TestScenario -@Name("test_longest_running_query. Check ClickHouseLongestRunningQuery") +@Name("test_longest_running_query# Check ClickHouseLongestRunningQuery") def test_longest_running_query(self, prometheus_operator_spec, clickhouse_operator_spec, chi): long_running_pod, long_running_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) # 600s trigger + 2*30s - double prometheus scraping interval @@ -517,7 +536,7 @@ def test_longest_running_query(self, prometheus_operator_spec, clickhouse_operat @TestScenario -@Name("test_query_preempted. Check ClickHouseQueryPreempted") +@Name("test_query_preempted# Check ClickHouseQueryPreempted") def test_query_preempted(self, prometheus_operator_spec, clickhouse_operator_spec, chi): priority_pod, priority_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -555,7 +574,7 @@ def run_queries_with_priority(): @TestScenario -@Name("test_read_only_replica. Check ClickHouseReadonlyReplica") +@Name("test_read_only_replica# Check ClickHouseReadonlyReplica") def test_read_only_replica(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( read_only_pod, @@ -573,8 +592,9 @@ def test_read_only_replica(self, prometheus_operator_spec, clickhouse_operator_s ) def restart_keeper(): + kill_cmd = "ps -ef | grep keeper | grep -v grep | awk -F'[ \\t]+' '{print \$2}' | xargs kill" kubectl.launch( - f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "kill 1"', + f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "{kill_cmd}"', ok_to_fail=True, ) clickhouse.query_with_error( @@ -641,7 +661,7 @@ def restart_keeper(): @TestScenario -@Name("test_replicas_max_absolute_delay. Check ClickHouseReplicasMaxAbsoluteDelay") +@Name("test_replicas_max_absolute_delay# Check ClickHouseReplicasMaxAbsoluteDelay") def test_replicas_max_absolute_delay(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( stop_replica_pod, @@ -702,7 +722,7 @@ def restart_clickhouse_and_insert_to_replicated_table(): @TestScenario -@Name("test_too_many_connections. Check ClickHouseTooManyConnections") +@Name("test_too_many_connections# Check ClickHouseTooManyConnections") def test_too_many_connections(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( too_many_connection_pod, @@ -724,12 +744,12 @@ def make_too_many_connection(): # HTTPConnection metric increase after full parsing of HTTP Request, we can't provide pause between CONNECT and QUERY running # long_cmd += f"nc -vv 127.0.0.1 {port} <( printf \"POST / HTTP/1.1\\r\\nHost: 127.0.0.1:8123\\r\\nContent-Length: 34\\r\\n\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\\r\\nTEST\");" long_cmd += ( - 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0";' + 'wget -qO- "http://127.0.0.1:8123?query=SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0, max_threads=1";' ) elif port == "9000": - long_cmd += 'clickhouse-client --send_logs_level trace --idle_connection_timeout 70 --receive_timeout 70 -q "SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0";' + long_cmd += 'clickhouse-client --send_logs_level trace --idle_connection_timeout 70 --receive_timeout 70 -q "SELECT sleepEachRow(1),number,now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0,max_threads=1";' # elif port == "3306": - # long_cmd += 'mysql -u default -h 127.0.0.1 -e "SELECT sleepEachRow(1),number, now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0";' + # long_cmd += 'mysql -u default -h 127.0.0.1 -e "SELECT sleepEachRow(1),number, now() FROM numbers(30) SETTINGS function_sleep_max_microseconds_per_block=0,max_threads=1";' else: long_cmd += f'printf "1\\n1" | nc -q 5 -i 30 -vv 127.0.0.1 {port};' @@ -766,7 +786,7 @@ def make_too_many_connection(): @TestScenario -@Name("test_too_much_running_queries. Check ClickHouseTooManyRunningQueries") +@Name("test_too_much_running_queries# Check ClickHouseTooManyRunningQueries") def test_too_much_running_queries(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( _, @@ -828,7 +848,7 @@ def make_too_many_queries(): @TestScenario -@Name("test_system_settings_changed. Check ClickHouseSystemSettingsChanged") +@Name("test_system_settings_changed# Check ClickHouseSystemSettingsChanged") def test_system_settings_changed(self, prometheus_operator_spec, clickhouse_operator_spec, chi): changed_pod, changed_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -889,7 +909,7 @@ def test_system_settings_changed(self, prometheus_operator_spec, clickhouse_oper @TestScenario -@Name("test_version_changed. Check ClickHouseVersionChanged") +@Name("test_version_changed# Check ClickHouseVersionChanged") def test_version_changed(self, prometheus_operator_spec, clickhouse_operator_spec, chi): changed_pod, changed_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -954,14 +974,15 @@ def test_version_changed(self, prometheus_operator_spec, clickhouse_operator_spe @TestScenario -@Name("test_zookeeper_hardware_exceptions. Check ClickHouseZooKeeperHardwareExceptions") +@Name("test_zookeeper_hardware_exceptions# Check ClickHouseZooKeeperHardwareExceptions") def test_zookeeper_hardware_exceptions(self, prometheus_operator_spec, clickhouse_operator_spec, chi): pod1, svc1, pod2, svc2 = alerts.random_pod_choice_for_callbacks(chi) chi_name = chi["metadata"]["name"] def restart_keeper(): + kill_cmd = "ps -ef | grep keeper | grep -v grep | awk -F'[ \\t]+' '{print \$2}' | xargs kill" kubectl.launch( - f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "kill 1"', + f'exec -n {self.context.test_namespace} {self.context.keeper_type}-0 -- sh -c "{kill_cmd}"', ok_to_fail=True, ) clickhouse.query_with_error( @@ -1009,7 +1030,7 @@ def restart_keeper(): @TestScenario -@Name("test_distributed_sync_insertion_timeout. Check ClickHouseDistributedSyncInsertionTimeoutExceeded") +@Name("test_distributed_sync_insertion_timeout# Check ClickHouseDistributedSyncInsertionTimeoutExceeded") def test_distributed_sync_insertion_timeout(self, prometheus_operator_spec, clickhouse_operator_spec, chi): ( sync_pod, @@ -1059,7 +1080,7 @@ def insert_distributed_sync(): @TestScenario -@Name("test_detached_parts. Check ClickHouseDetachedParts") +@Name("test_detached_parts# Check ClickHouseDetachedParts") def test_detached_parts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): clickhouse.create_table_on_cluster(chi) detached_pod, detached_svc, _, _ = alerts.random_pod_choice_for_callbacks(chi) @@ -1118,14 +1139,27 @@ def attach_all_parts(): @TestScenario -@Name("test_clickhouse_keeper_alerts. Check ClickHouseKeeperDown") -def test_clickhouse_keeper_alerts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): +@Name("test_clickhouse_keeper_alerts# Check ClickHouseKeeperDown") +def test_clickhouse_keeper_alerts(self): + alerts.initialize( + chi_file="manifests/chi/test-cluster-for-alerts.yaml", + chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", + chi_name="test-cluster-for-alerts", + keeper_type="clickhouse-keeper", + ) test_keeper_alerts_outline(keeper_type="clickhouse-keeper") @TestScenario -@Name("test_zookeeper_alerts. Check ZookeeperDown, ZookeeperRestartRecently") -def test_zookeeper_alerts(self, prometheus_operator_spec, clickhouse_operator_spec, chi): +@Name("test_zookeeper_alerts# Check ZookeeperDown, ZookeeperRestartRecently") +def test_zookeeper_alerts(self): + alerts.initialize( + chi_file="manifests/chi/test-cluster-for-alerts.yaml", + chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", + chi_name="test-cluster-for-alerts", + keeper_type="zookeeper", + ) + test_keeper_alerts_outline(keeper_type="zookeeper") @@ -1145,8 +1179,9 @@ def test_keeper_alerts_outline(self, keeper_type): } def restart_keeper(): + kill_cmd = "ps -ef | grep keeper | grep -v grep | awk -F'[ \\t]+' '{print \$2}' | xargs kill" kubectl.launch( - f'exec -n {self.context.test_namespace} {keeper_spec} -- sh -c "kill 1"', + f'exec -n {self.context.test_namespace} {keeper_spec} -- sh -c "{kill_cmd}"', ok_to_fail=True, ) @@ -1167,7 +1202,7 @@ def wait_when_keeper_up(): "firing", True, labels={"pod_name": keeper_spec}, - time_range="1m", + time_range="3m", sleep_time=settings.prometheus_scrape_interval, callback=restart_keeper, ) @@ -1194,7 +1229,7 @@ def wait_when_keeper_up(): "firing", True, labels={"pod_name": keeper_spec}, - time_range="30s", + time_range="3m", ) assert fired, error(f"can't get {expected_alerts[keeper_type]['restart']} alert in firing state") @@ -1217,35 +1252,16 @@ def wait_when_keeper_up(): def test(self): with Given("I setup settings"): steps.set_settings() + self.context.test_namespace = "test-metrics-alerts" + self.context.operator_namespace = "test-metrics-alerts" with Given("I create shell"): shell = steps.get_shell() self.context.shell = shell util.clean_namespace(delete_chi=True) util.install_operator_if_not_exist() - (prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi,) = alerts.initialize( - chi_file="manifests/chi/test-cluster-for-alerts.yaml", - chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", - chi_name="test-cluster-for-alerts", - keeper_type="clickhouse-keeper", - ) - Scenario(test=test_clickhouse_keeper_alerts)( - prometheus_operator_spec=prometheus_operator_spec, - clickhouse_operator_spec=clickhouse_operator_spec, - chi=chi, - ) - - (prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi,) = alerts.initialize( - chi_file="manifests/chi/test-cluster-for-alerts.yaml", - chi_template_file="manifests/chit/tpl-clickhouse-alerts.yaml", - chi_name="test-cluster-for-alerts", - keeper_type="zookeeper", - ) - Scenario(test=test_zookeeper_alerts)( - prometheus_operator_spec=prometheus_operator_spec, - clickhouse_operator_spec=clickhouse_operator_spec, - chi=chi, - ) + Scenario(test=test_zookeeper_alerts) + Scenario(test=test_clickhouse_keeper_alerts) (prometheus_operator_spec, prometheus_spec, alertmanager_spec, clickhouse_operator_spec, chi,) = alerts.initialize( chi_file="manifests/chi/test-cluster-for-alerts.yaml", @@ -1254,14 +1270,13 @@ def test(self): keeper_type=self.context.keeper_type, ) - test_cases = [ + all_tests = [ test_prometheus_setup, + test_insert_related_alerts, + test_distributed_connection_exceptions, test_read_only_replica, - test_replicas_max_absolute_delay, test_metrics_exporter_down, test_clickhouse_dns_errors, - test_distributed_connection_exceptions, - test_insert_related_alerts, test_too_many_connections, test_too_much_running_queries, test_longest_running_query, @@ -1272,10 +1287,13 @@ def test(self): test_distributed_files_to_insert, test_detached_parts, test_clickhouse_server_reboot, + test_replicas_max_absolute_delay, ] - for t in test_cases: + for t in all_tests: Scenario(test=t)( prometheus_operator_spec=prometheus_operator_spec, clickhouse_operator_spec=clickhouse_operator_spec, chi=chi, ) + + util.clean_namespace(delete_chi=True, delete_keeper=True) diff --git a/tests/e2e/test_metrics_exporter.py b/tests/e2e/test_metrics_exporter.py index 4e6f4c52c..1c99d0426 100644 --- a/tests/e2e/test_metrics_exporter.py +++ b/tests/e2e/test_metrics_exporter.py @@ -1,5 +1,6 @@ -import time -import re +import json +import os +os.environ["TEST_NAMESPACE"]="test-metrics-exporter" import json from e2e.steps import * @@ -12,16 +13,16 @@ @TestScenario -@Name("Check metrics server setup and version") +@Name("test_metrics_exporter_setup: Check metrics server setup and version") def test_metrics_exporter_setup(self): with Given("clickhouse-operator is installed"): - assert kubectl.get_count("pod", ns="--all-namespaces", label=util.operator_label) > 0, error() + assert kubectl.get_count("pod", ns=self.context.operator_namespace, label=util.operator_label) > 0, error() with Then(f"Set metrics-exporter version {settings.operator_version}"): util.set_metrics_exporter_version(settings.operator_version) @TestScenario -@Name("Test basic metrics exporter functionality") +@Name("test_metrics_exporter_chi: Test basic metrics exporter functionality") def test_metrics_exporter_chi(self): def check_monitoring_chi(operator_namespace, operator_pod, expect_result, max_retries=10): with Then(f"metrics-exporter /chi endpoint result should return {expect_result}"): @@ -74,7 +75,7 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma "true,true", ns=self.context.operator_namespace, ) - assert kubectl.get_count("pod", ns="--all-namespaces", label=util.operator_label) > 0, error() + assert kubectl.get_count("pod", ns=self.context.operator_namespace, label=util.operator_label) > 0, error() out = kubectl.launch("get pods -l app=clickhouse-operator", ns=self.context.operator_namespace).splitlines()[1] operator_pod = re.split(r"[\t\r\n\s]+", out)[0] @@ -95,7 +96,7 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma ) expected_chi = [ { - "namespace": "test", + "namespace": self.context.test_namespace, "name": "test-017-multi-version", "labels": {"clickhouse.altinity.com/chi": "test-017-multi-version"}, "annotations": {"clickhouse.altinity.com/email": "myname@mydomain.com, yourname@yourdoman.com"}, @@ -105,13 +106,13 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma "hosts": [ { "name": "0-0", - "hostname": "chi-test-017-multi-version-default-0-0.test.svc.cluster.local", + "hostname": f"chi-test-017-multi-version-default-0-0.{self.context.test_namespace}.svc.cluster.local", "tcpPort": 9000, "httpPort": 8123 }, { "name": "1-0", - "hostname": "chi-test-017-multi-version-default-1-0.test.svc.cluster.local", + "hostname": f"chi-test-017-multi-version-default-1-0.{self.context.test_namespace}.svc.cluster.local", "tcpPort": 9000, "httpPort": 8123 } @@ -170,8 +171,8 @@ def check_monitoring_metrics(operator_namespace, operator_pod, expect_result, ma def test(self): with Given("set settings"): set_settings() - self.context.test_namespace = "test" - self.context.operator_namespace = "test" + self.context.test_namespace = "test-metrics-exporter" + self.context.operator_namespace = "test-metrics-exporter" with Given("I create shell"): shell = get_shell() self.context.shell = shell diff --git a/tests/e2e/test_operator.py b/tests/e2e/test_operator.py index 402ed6f6a..f9d1d6de0 100644 --- a/tests/e2e/test_operator.py +++ b/tests/e2e/test_operator.py @@ -19,7 +19,7 @@ @TestScenario -@Name("test_001. 1 node") +@Name("test_001# 1 node") @Requirements(RQ_SRS_026_ClickHouseOperator_Create("1.0")) def test_001(self): create_shell_namespace_clickhouse_template() @@ -41,7 +41,7 @@ def test_001(self): @TestScenario -@Name("test_002. useTemplates for pod, volume templates, and distribution") +@Name("test_002# useTemplates for pod, volume templates, and distribution") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_UseTemplates("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_UseTemplates_Name("1.0"), @@ -70,7 +70,7 @@ def test_002(self): @TestScenario -@Name("test_003. 4 nodes with custom layout definition") +@Name("test_003# 4 nodes with custom layout definition") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_Layout("1.0"), @@ -96,7 +96,7 @@ def test_003(self): @TestScenario -@Name("test_004. Compatibility test if old syntax with volumeClaimTemplate is still supported") +@Name("test_004# Compatibility test if old syntax with volumeClaimTemplate is still supported") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_VolumeClaimTemplates("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_VolumeClaimTemplates_Name("1.0"), @@ -120,7 +120,7 @@ def test_004(self): @TestScenario -@Name("test_005. Test manifest created by ACM") +@Name("test_005# Test manifest created by ACM") @Requirements(RQ_SRS_026_ClickHouseOperator_ACM("1.0")) def test_005(self): create_shell_namespace_clickhouse_template() @@ -141,13 +141,13 @@ def test_005(self): @TestScenario -@Name("test_006. Test clickhouse version upgrade from one version to another using podTemplate change") +@Name("test_006# Test clickhouse version upgrade from one version to another using podTemplate change") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_VersionUpgrades("1.0")) def test_006(self): create_shell_namespace_clickhouse_template() - old_version = "clickhouse/clickhouse-server:23.8" - new_version = "clickhouse/clickhouse-server:24.3" + old_version = "clickhouse/clickhouse-server:24.3" + new_version = "clickhouse/clickhouse-server:24.8" with Then("Create initial position"): kubectl.create_and_check( manifest="manifests/chi/test-006-ch-upgrade-1.yaml", @@ -180,7 +180,7 @@ def test_006(self): @TestScenario -@Name("test_007. Test template with custom clickhouse ports") +@Name("test_007# Test template with custom clickhouse ports") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_HostTemplates_Spec_InterServerHttpPort("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_HostTemplates_Spec_TcpPort("1.0"), @@ -367,7 +367,7 @@ def check_remote_servers(self, chi, shards, trigger_event, shell=None, cluster=" @TestScenario -@Name("test_008_1. Test operator restart") +@Name("test_008_1# Test operator restart") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_RestartingOperator("1.0")) def test_008_1(self): create_shell_namespace_clickhouse_template() @@ -383,7 +383,7 @@ def test_008_1(self): @TestScenario -@Name("test_008_2. Test operator restart") +@Name("test_008_2# Test operator restart") def test_008_2(self): create_shell_namespace_clickhouse_template() @@ -398,7 +398,7 @@ def test_008_2(self): @TestScenario -@Name("test_008_3. Test operator restart in the middle of reconcile") +@Name("test_008_3# Test operator restart in the middle of reconcile") def test_008_3(self): create_shell_namespace_clickhouse_template() @@ -565,7 +565,7 @@ def test_operator_upgrade(self, manifest, service, version_from, version_to=None @TestScenario -@Name("test_009_1. Test operator upgrade") +@Name("test_009_1# Test operator upgrade") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_UpgradingOperator("1.0")) @Tags("NO_PARALLEL") def test_009_1(self, version_from="0.23.7", version_to=None): @@ -582,7 +582,7 @@ def test_009_1(self, version_from="0.23.7", version_to=None): @TestScenario -@Name("test_009_2. Test operator upgrade") +@Name("test_009_2# Test operator upgrade") @Tags("NO_PARALLEL") def test_009_2(self, version_from="0.23.7", version_to=None): if version_to is None: @@ -598,7 +598,7 @@ def test_009_2(self, version_from="0.23.7", version_to=None): @TestScenario -@Name("test_010. Test zookeeper initialization") +@Name("test_010# Test zookeeper initialization") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_ZooKeeper("1.0")) def test_010(self): create_shell_namespace_clickhouse_template() @@ -632,7 +632,7 @@ def get_user_xml_from_configmap(chi, user): @TestScenario -@Name("test_011_1. Test user security and network isolation") +@Name("test_011_1# Test user security and network isolation") @Requirements(RQ_SRS_026_ClickHouseOperator_DefaultUsers("1.0")) def test_011_1(self): create_shell_namespace_clickhouse_template() @@ -806,7 +806,7 @@ def test_default_user(): @TestScenario -@Name("test_011_2. Test default user security") +@Name("test_011_2# Test default user security") @Requirements(RQ_SRS_026_ClickHouseOperator_DefaultUsers("1.0")) def test_011_2(self): create_shell_namespace_clickhouse_template() @@ -856,7 +856,7 @@ def test_011_2(self): @TestScenario -@Name("test_011_3. Test k8s secrets usage") +@Name("test_011_3# Test k8s secrets usage") @Requirements(RQ_SRS_026_ClickHouseOperator_Secrets("1.0")) def test_011_3(self): create_shell_namespace_clickhouse_template() @@ -943,7 +943,7 @@ def test_011_3(self): @TestScenario -@Name("test_012. Test service templates") +@Name("test_012# Test service templates") @Requirements( RQ_SRS_026_ClickHouseOperator_ServiceTemplates("1.0"), RQ_SRS_026_ClickHouseOperator_ServiceTemplates_NameGeneration("1.0"), @@ -1014,7 +1014,7 @@ def test_012(self): RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_AddingShards("1.0"), RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_SchemaPropagation("1.0"), ) -@Name("test_013_1. Automatic schema propagation for shards") +@Name("test_013_1# Automatic schema propagation for shards") def test_013_1(self): """Check clickhouse operator supports automatic schema propagation for shards.""" create_shell_namespace_clickhouse_template() @@ -1294,7 +1294,7 @@ def wait_for_cluster(chi, cluster, num_shards, num_replicas=0, pwd="", force_wai @TestScenario -@Name("test_014_0. Test that schema is correctly propagated on replicas") +@Name("test_014_0# Test that schema is correctly propagated on replicas") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_ZooKeeper("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters("1.0"), @@ -1606,7 +1606,7 @@ def check_schema_propagation(replicas): @TestScenario -@Name("test_014_1. Test replication under different configuration scenarios") +@Name("test_014_1# Test replication under different configuration scenarios") def test_014_1(self): create_shell_namespace_clickhouse_template() @@ -1696,7 +1696,7 @@ def check_data_is_replicated(replicas, v): @TestScenario -@Name("test_015. Test circular replication with hostNetwork") +@Name("test_015# Test circular replication with hostNetwork") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_CircularReplication("1.0")) def test_015(self): create_shell_namespace_clickhouse_template() @@ -1736,7 +1736,7 @@ def test_015(self): "test-015-host-network", host="chi-test-015-host-network-default-0-0", port="10000", - sql="SELECT count() FROM cluster('all-sharded', system.one) settings receive_timeout=10", + sql="SELECT count() FROM cluster('all-sharded', system.one) SETTINGS receive_timeout=10", ) note(f"cluster out:\n{out}") print(f"out: {out}") @@ -1747,7 +1747,7 @@ def test_015(self): @TestScenario -@Name("test_016. Test advanced settings options") +@Name("test_016# Test advanced settings options") @Requirements( RQ_SRS_026_ClickHouseOperator_ConfigurationFileControl_EmbeddedXML("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters("1.0"), @@ -1920,7 +1920,7 @@ def test_016(self): @TestScenario -@Name("test_017. Test deployment of multiple versions in a cluster") +@Name("test_017# Test deployment of multiple versions in a cluster") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_DifferentClickHouseVersionsOnReplicasAndShards("1.0")) def test_017(self): create_shell_namespace_clickhouse_template() @@ -1961,7 +1961,7 @@ def test_017(self): @TestScenario -@Name("test_018. Test that server settings are applied before StatefulSet is started") +@Name("test_018# Test that server settings are applied before StatefulSet is started") # Obsolete, covered by test_016 def test_018(self): create_shell_namespace_clickhouse_template() @@ -2180,7 +2180,7 @@ def test_019(self, step=1): @TestScenario -@Name("test_019_1. Test that volume is correctly retained and can be re-attached. Provisioner: StatefulSet") +@Name("test_019_1# Test that volume is correctly retained and can be re-attached. Provisioner: StatefulSet") @Requirements(RQ_SRS_026_ClickHouseOperator_RetainingVolumeClaimTemplates("1.0")) def test_019_1(self): create_shell_namespace_clickhouse_template() @@ -2189,7 +2189,7 @@ def test_019_1(self): @TestScenario -@Name("test_019_2. Test that volume is correctly retained and can be re-attached. Provisioner: Operator") +@Name("test_019_2# Test that volume is correctly retained and can be re-attached. Provisioner: Operator") @Requirements(RQ_SRS_026_ClickHouseOperator_RetainingVolumeClaimTemplates("1.0")) def test_019_2(self): create_shell_namespace_clickhouse_template() @@ -2242,7 +2242,7 @@ def test_020(self, step=1): @TestScenario -@Name("test_020_1. Test multi-volume configuration, step=1") +@Name("test_020_1# Test multi-volume configuration, step=1") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_MultipleStorageVolumes("1.0")) def test_020_1(self): create_shell_namespace_clickhouse_template() @@ -2251,7 +2251,7 @@ def test_020_1(self): @TestScenario -@Name("test_020_2. Test multi-volume configuration, step=2") +@Name("test_020_2# Test multi-volume configuration, step=2") @Requirements(RQ_SRS_026_ClickHouseOperator_Deployments_MultipleStorageVolumes("1.0")) def test_020_2(self): create_shell_namespace_clickhouse_template() @@ -2471,7 +2471,7 @@ def test_021(self, step=1): @TestScenario -@Name("test_021_1. Test rescaling storage. Provisioner: StatefulSet") +@Name("test_021_1# Test rescaling storage. Provisioner: StatefulSet") @Requirements(RQ_SRS_026_ClickHouseOperator_StorageProvisioning("1.0")) def test_021_1(self): create_shell_namespace_clickhouse_template() @@ -2480,7 +2480,7 @@ def test_021_1(self): @TestScenario -@Name("test_021_2. Test rescaling storage. Provisioner: Operator") +@Name("test_021_2# Test rescaling storage. Provisioner: Operator") @Requirements(RQ_SRS_026_ClickHouseOperator_StorageProvisioning("1.0")) def test_021_2(self): create_shell_namespace_clickhouse_template() @@ -2489,7 +2489,7 @@ def test_021_2(self): @TestScenario -@Name("test_022. Test that chi with broken image can be deleted") +@Name("test_022# Test that chi with broken image can be deleted") @Requirements(RQ_SRS_026_ClickHouseOperator_DeleteBroken("1.0")) def test_022(self): create_shell_namespace_clickhouse_template() @@ -2520,7 +2520,7 @@ def test_022(self): @TestScenario -@Name("test_023. Test auto templates") +@Name("test_023# Test auto templates") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templating("1.0")) def test_023(self): create_shell_namespace_clickhouse_template() @@ -2588,7 +2588,7 @@ def test_023(self): @TestScenario -@Name("test_024. Test annotations for various template types") +@Name("test_024# Test annotations for various template types") @Requirements(RQ_SRS_026_ClickHouseOperator_AnnotationsInTemplates("1.0")) def test_024(self): create_shell_namespace_clickhouse_template() @@ -2689,7 +2689,7 @@ def check_annotations(annotation, value, allow_to_fail_for_pvc=False): @TestScenario -@Name("test_025. Test that service is available during re-scaling, upgrades etc.") +@Name("test_025# Test that service is available during re-scaling, upgrades etc.") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_AddingReplicas("1.0")) def test_025(self): create_shell_namespace_clickhouse_template() @@ -2807,7 +2807,7 @@ def test_025(self): @TestScenario -@Name("test_026. Test mixed single and multi-volume configuration in one cluster") +@Name("test_026# Test mixed single and multi-volume configuration in one cluster") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_Layout("1.0")) def test_026(self): create_shell_namespace_clickhouse_template() @@ -2887,7 +2887,7 @@ def test_026(self): @TestScenario -@Name("test_027. Test troubleshooting mode") +@Name("test_027# Test troubleshooting mode") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Troubleshoot("1.0")) def test_027(self): # TODO: Add a case for a custom endpoint @@ -2939,7 +2939,7 @@ def test_027(self): @TestScenario -@Name("test_028. Test restart scenarios") +@Name("test_028# Test restart scenarios") @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_RestartingOperator("1.0")) def test_028(self): create_shell_namespace_clickhouse_template() @@ -2994,14 +2994,16 @@ def test_028(self): sql, pod="chi-test-028-replication-default-0-0-0", host="chi-test-028-replication-default-0-0", - advanced_params="--connect_timeout=1 --send_timeout=10 --receive_timeout=10", + timeout=10, + advanced_params="--connect_timeout=1 --send_timeout=10", ) ch2 = clickhouse.query_with_error( chi, sql, pod="chi-test-028-replication-default-1-0-0", host="chi-test-028-replication-default-1-0", - advanced_params="--connect_timeout=1 --send_timeout=10 --receive_timeout=10", + timeout=10, + advanced_params="--connect_timeout=1 --send_timeout=10", ) if "error" in ch1 or "Exception" in ch1 or ch2.endswith("1"): @@ -3075,7 +3077,7 @@ def test_028(self): @TestScenario -@Name("test_029. Test different distribution settings") +@Name("test_029# Test different distribution settings") @Requirements( RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_PodTemplates_podDistribution("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Templates_PodTemplates_podDistribution_Type("1.0"), @@ -3119,7 +3121,7 @@ def test_029(self): @TestScenario -@Name("test_030. Test CRD deletion") +@Name("test_030# Test CRD deletion") @Tags("NO_PARALLEL") def test_030(self): create_shell_namespace_clickhouse_template() @@ -3187,7 +3189,7 @@ def test_030(self): @TestScenario -@Name("test_031. Test excludeFromPropagationAnnotations work") +@Name("test_031# Test excludeFromPropagationAnnotations work") def test_031(self): create_shell_namespace_clickhouse_template() @@ -3333,7 +3335,7 @@ def run_insert_query(self, host, user, password, query, trigger_event, shell=Non @TestScenario -@Name("test_032. Test rolling update logic") +@Name("test_032# Test rolling update logic") # @Tags("NO_PARALLEL") def test_032(self): """Test rolling update logic.""" @@ -3441,7 +3443,7 @@ def test_032(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_EnableHttps("1.0")) -@Name("test_034. Check HTTPS support for health check") +@Name("test_034# Check HTTPS support for health check") def test_034(self): """Check ClickHouse-Operator HTTPS support by switching configuration to HTTPS using the chopconf file and creating a ClickHouse-Installation with HTTPS enabled and confirming the secure connectivity between them by @@ -3599,7 +3601,7 @@ def test_034(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_ReprovisioningVolume("1.0")) -@Name("test_036. Check operator volume re-provisioning") +@Name("test_036# Check operator volume re-provisioning") def test_036(self): """Check clickhouse operator recreates volumes and schema if volume is broken.""" create_shell_namespace_clickhouse_template() @@ -3788,7 +3790,7 @@ def check_data_is_recovered(reconcile_task_id): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Managing_StorageManagementSwitch("1.0")) -@Name("test_037. StorageManagement switch") +@Name("test_037# StorageManagement switch") def test_037(self): """Check clickhouse-operator supports switching storageManagement config option from default (StatefulSet) to Operator""" @@ -3894,7 +3896,7 @@ def test_037(self): @TestCheck -@Name("test_039. Inter-cluster communications with secret") +@Name("test_039# Inter-cluster communications with secret") def test_039(self, step=0, delete_chi=0): """Check clickhouse-operator support inter-cluster communications with secrets.""" cluster = "default" @@ -3967,7 +3969,7 @@ def test_039(self, step=0, delete_chi=0): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_039_0. Inter-cluster communications with no secret defined") +@Name("test_039_0# Inter-cluster communications with no secret defined") def test_039_0(self): create_shell_namespace_clickhouse_template() @@ -3976,7 +3978,7 @@ def test_039_0(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_039_1. Inter-cluster communications with 'auto' secret") +@Name("test_039_1# Inter-cluster communications with 'auto' secret") def test_039_1(self): """Check clickhouse-operator support inter-cluster communications with 'auto' secret.""" create_shell_namespace_clickhouse_template() @@ -3986,7 +3988,7 @@ def test_039_1(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_039_2. Inter-cluster communications with plan text secret") +@Name("test_039_2# Inter-cluster communications with plan text secret") def test_039_2(self): """Check clickhouse-operator support inter-cluster communications with plan text secret.""" create_shell_namespace_clickhouse_template() @@ -3996,7 +3998,7 @@ def test_039_2(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_039_3. Inter-cluster communications with k8s secret") +@Name("test_039_3# Inter-cluster communications with k8s secret") def test_039_3(self): """Check clickhouse-operator support inter-cluster communications with k8s secret.""" create_shell_namespace_clickhouse_template() @@ -4006,7 +4008,7 @@ def test_039_3(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_InterClusterCommunicationWithSecret("1.0")) -@Name("test_039_4. Inter-cluster communications over HTTPS") +@Name("test_039_4# Inter-cluster communications over HTTPS") def test_039_4(self): """Check clickhouse-operator support inter-cluster communications over HTTPS.""" create_shell_namespace_clickhouse_template() @@ -4015,7 +4017,7 @@ def test_039_4(self): @TestScenario -@Name("test_040. Inject a startup probe using an auto template") +@Name("test_040# Inject a startup probe using an auto template") def test_040(self): create_shell_namespace_clickhouse_template() @@ -4053,7 +4055,7 @@ def test_040(self): @TestScenario -@Name("test_041. Secure zookeeper") +@Name("test_041# Secure zookeeper") def test_041(self): """Check clickhouse operator support secure zookeeper.""" @@ -4117,7 +4119,7 @@ def test_041(self): @TestScenario -@Name("test_042. Test configuration rollback") +@Name("test_042# Test configuration rollback") def test_042(self): create_shell_namespace_clickhouse_template() with Given("I change operator statefullSet timeout"): @@ -4214,7 +4216,7 @@ def test_042(self): @TestCheck -@Name("test_043. Logs container customizing") +@Name("test_043# Logs container customizing") def test_043(self, manifest): """Check that clickhouse-operator support logs container customizing.""" @@ -4255,7 +4257,7 @@ def test_043(self, manifest): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Defaults_Templates_logVolumeClaimTemplate("1.0")) -@Name("test_043_0. Logs container customizing using PodTemplate") +@Name("test_043_0# Logs container customizing using PodTemplate") def test_043_0(self): """Check that clickhouse-operator support manual logs container customizing.""" create_shell_namespace_clickhouse_template() @@ -4265,7 +4267,7 @@ def test_043_0(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Defaults_Templates_logVolumeClaimTemplate("1.0")) -@Name("test_043_1. Default clickhouse-log container") +@Name("test_043_1# Default clickhouse-log container") def test_043_1(self): """Check that clickhouse-operator sets up default logs container if it is not specified in Pod.""" create_shell_namespace_clickhouse_template() @@ -4276,7 +4278,7 @@ def test_043_1(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_ReconcilingCycle("1.0"), RQ_SRS_026_ClickHouseOperator_Managing_ClusterScaling_SchemaPropagation("1.0")) -@Name("test_044. Schema and data propagation with slow replica") +@Name("test_044# Schema and data propagation with slow replica") def test_044(self): """Check that schema and data can be propagated on other replica if replica start takes a lot of time.""" create_shell_namespace_clickhouse_template() @@ -4347,7 +4349,7 @@ def test_044(self): @TestCheck -@Name("test_045. Restart operator without waiting for queries to finish") +@Name("test_045# Restart operator without waiting for queries to finish") def test_045(self, manifest): """Check that operator support does not wait for the query to finish before operator commences restart.""" @@ -4383,7 +4385,7 @@ def test_045(self, manifest): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Reconciling_Policy("1.0")) -@Name("test_045_1. Reconcile wait queries property specified by CHI") +@Name("test_045_1# Reconcile wait queries property specified by CHI") def test_045_1(self): """Check that operator supports spec.reconciling.policy property in CHI that forces the operator not to wait for the queries to finish before restart.""" @@ -4395,7 +4397,7 @@ def test_045_1(self): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_Configuration_Spec_ReconcileWaitQueries("1.0")) -@Name("test_045_2. Reconcile wait queries property specified by clickhouse-operator config") +@Name("test_045_2# Reconcile wait queries property specified by clickhouse-operator config") def test_045_2(self): """Check that operator supports spec.reconcile.host.wait.queries property in clickhouse-operator config that forces the operator not to wait for the queries to finish before restart.""" @@ -4408,7 +4410,7 @@ def test_045_2(self): @TestScenario -@Name("test_046. Metrics for clickhouse-operator") +@Name("test_046# Metrics for clickhouse-operator") def test_046(self): """Check that clickhouse-operator creates metrics for reconcile and other clickhouse-operator events.""" create_shell_namespace_clickhouse_template() @@ -4519,7 +4521,7 @@ def check_metrics(metric_names): @TestScenario @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Spec_Configuration_Clusters_Cluster_Layout_Shards_Weight("1.0")) -@Name("test_047. Zero weighted shard") +@Name("test_047# Zero weighted shard") def test_047(self): """Check that clickhouse-operator supports specifying shard weight as 0 and check that data not inserted into zero-weighted shard in distributed table.""" @@ -4586,7 +4588,7 @@ def test_047(self): @TestScenario -@Name("test_048. Clickhouse-keeper") +@Name("test_048# Clickhouse-keeper") @Requirements(RQ_SRS_026_ClickHouseOperator_CustomResource_Kind_ClickHouseKeeperInstallation("1.0"), RQ_SRS_026_ClickHouseOperator_CustomResource_ClickHouseKeeperInstallation_volumeClaimTemplates("1.0")) def test_048(self): @@ -4613,7 +4615,7 @@ def test_048(self): @TestScenario -@Name("test_049. Clickhouse-keeper upgrade") +@Name("test_049# Clickhouse-keeper upgrade") def test_049(self): """Check that clickhouse-operator support upgrading clickhouse-keeper version when clickhouse-keeper defined with ClickHouseKeeperInstallation.""" @@ -4662,7 +4664,7 @@ def test_049(self): @TestScenario -@Name("test_050. Test metrics exclusion in operator config") +@Name("test_050# Test metrics exclusion in operator config") def test_050(self): create_shell_namespace_clickhouse_template() with Given("Operator configuration is installed"): @@ -4723,7 +4725,7 @@ def test_labels(chi, type, key, value): @TestScenario -@Name("test_051. Test CHK upgrade from 0.23.x operator version") +@Name("test_051# Test CHK upgrade from 0.23.x operator version") @Tags("NO_PARALLEL") def test_051(self): with Then("Skip it. test_051_1 does a better job"): @@ -4810,7 +4812,7 @@ def test_051(self): @TestScenario -@Name("test_051_1. Test CHK upgrade from 0.23.x operator version") +@Name("test_051_1# Test CHK upgrade from 0.23.x operator version") @Tags("NO_PARALLEL") def test_051_1(self): version_from = "0.23.7" @@ -4911,7 +4913,7 @@ def test_051_1(self): delete_test_namespace() @TestScenario -@Name("test_052. Clickhouse-keeper scale-up/scale-down") +@Name("test_052# Clickhouse-keeper scale-up/scale-down") def test_052(self): """Check that clickhouse-operator support scale-up/scale-down without service interruption""" diff --git a/tests/helpers/argparser.py b/tests/helpers/argparser.py index 7390fef3f..2d23ca60f 100644 --- a/tests/helpers/argparser.py +++ b/tests/helpers/argparser.py @@ -3,7 +3,7 @@ def argparser(parser): parser.add_argument( "--native", action="store_true", - help="run tests without docker-compose, require only working kubectl + python", + help="run tests without docker compose, require only working kubectl + python", default=False, ) parser.add_argument( diff --git a/tests/helpers/cluster.py b/tests/helpers/cluster.py index 71a131410..a29064a20 100644 --- a/tests/helpers/cluster.py +++ b/tests/helpers/cluster.py @@ -18,7 +18,7 @@ def __exit__(self, type, value, traceback): # to terminate any open shell commands. # This is needed for example # to solve a problem with - # 'docker-compose exec {name} bash --noediting' + # 'docker compose exec {name} bash --noediting' # that does not clean up open bash processes # if not exited normally for i in range(10): @@ -32,13 +32,13 @@ def __exit__(self, type, value, traceback): class Cluster(object): - """Simple object around docker-compose cluster.""" + """Simple object around docker compose cluster.""" def __init__(self, configs_dir=None): self.environ = {} self.configs_dir = configs_dir - self.docker_compose = "docker-compose" + self.docker_compose = "docker compose" self.shell = Shell() frame = inspect.currentframe().f_back @@ -62,7 +62,7 @@ def __init__(self, configs_dir=None): ) def __enter__(self): - with Given("docker-compose cluster"): + with Given("docker compose cluster"): self.up() return self @@ -71,11 +71,11 @@ def __exit__(self, type, value, traceback): self.down() def down(self, timeout=3600): - """Bring cluster down by executing docker-compose down.""" + """Bring cluster down by executing docker compose down.""" return self.shell(f"{self.docker_compose} down --timeout {timeout} -v --remove-orphans") def up(self, timeout=3600): - with Given("docker-compose"): + with Given("docker compose"): max_attempts = 5 max_up_attempts = 1 @@ -84,7 +84,7 @@ def up(self, timeout=3600): with By("checking if any containers are already running"): self.shell(f"set -o pipefail && {self.docker_compose} ps | tee") - with And("executing docker-compose down just in case it is up"): + with And("executing docker compose down just in case it is up"): cmd = self.shell( f"set -o pipefail && {self.docker_compose} down --timeout={timeout} -v --remove-orphans 2>&1 | tee" ) @@ -94,7 +94,7 @@ def up(self, timeout=3600): with And("checking if any containers are still left running"): self.shell(f"set -o pipefail && {self.docker_compose} ps | tee") - with And("executing docker-compose up"): + with And("executing docker compose up"): for up_attempt in range(max_up_attempts): with By(f"attempt {up_attempt}/{max_up_attempts}"): cmd = self.shell( @@ -113,4 +113,4 @@ def up(self, timeout=3600): break if cmd.exitcode != 0 or "is unhealthy" in cmd.output or "Exit" in ps_cmd.output: - fail("could not bring up docker-compose cluster") + fail("could not bring up docker compose cluster") diff --git a/tests/image/build_docker.sh b/tests/image/build_docker.sh index 43aa020d9..bd8d0832a 100755 --- a/tests/image/build_docker.sh +++ b/tests/image/build_docker.sh @@ -7,22 +7,22 @@ OPERATOR_IMAGE="altinity/clickhouse-operator:${OPERATOR_VERSION}" OPERATOR_IMAGE_OLD="altinity/clickhouse-operator:${OPERATOR_VERSION_OLD}" METRICS_EXPORTER_IMAGE="altinity/metrics-exporter:${OPERATOR_VERSION}" METRICS_EXPORTER_IMAGE_OLD="altinity/metrics-exporter:${OPERATOR_VERSION_OLD}" -CLICKHOUSE_BACKUP_IMAGE="altinity/clickhouse-backup:2.4.15" -CLICKHOUSE_IMAGE=${CLICKHOUSE_IMAGE:="clickhouse/clickhouse-server:23.8"} -CLICKHOUSE_IMAGE_OLD=${CLICKHOUSE_IMAGE_OLD:="clickhouse/clickhouse-server:23.3"} +CLICKHOUSE_BACKUP_IMAGE="altinity/clickhouse-backup:latest" +CLICKHOUSE_IMAGE=${CLICKHOUSE_IMAGE:="clickhouse/clickhouse-server:24.8"} +CLICKHOUSE_IMAGE_OLD=${CLICKHOUSE_IMAGE_OLD:="clickhouse/clickhouse-server:24.3"} CLICKHOUSE_IMAGE_LATEST=${CLICKHOUSE_IMAGE_LATEST:="clickhouse/clickhouse-server:latest"} CLICKHOUSE_OPERATOR_TESTS_IMAGE=${CLICKHOUSE_OPERATOR_TESTS_IMAGE:="registry.gitlab.com/altinity-public/container-images/clickhouse-operator-test-runner:latest"} ZOOKEEPER_IMAGE=${ZOOKEEPER_IMAGE:="zookeeper:3.8.4"} -K8S_VERSION=${K8S_VERSION:=1.28.5} +K8S_VERSION=${K8S_VERSION:=1.30.1} MINIKUBE_PRELOADED_TARBALL="preloaded-images-k8s-v18-v${K8S_VERSION}-docker-overlay2-amd64.tar.lz4" -MINIKUBE_KICBASE_IMAGE=${MINIKUBE_KICBASE_IMAGE:-"gcr.io/k8s-minikube/kicbase:v0.0.42"} -MINIKUBE_STORAGE_IMAGE=${MINIKUBE_STORAGE_IMAGE:="gcr.io/k8s-minikube/storage-provisioner:v20210514"} +MINIKUBE_KICBASE_IMAGE=${MINIKUBE_KICBASE_IMAGE:-"gcr.io/k8s-minikube/kicbase:v0.0.45"} +MINIKUBE_STORAGE_IMAGE=${MINIKUBE_STORAGE_IMAGE:="gcr.io/k8s-minikube/storage-provisioner:latest"} -MINIO_IMAGE=${MINIO_IMAGE:="minio/minio:RELEASE.2021-06-17T00-10-46Z"} +MINIO_IMAGE=${MINIO_IMAGE:="minio/minio:latest"} MINIO_CONSOLE_IMAGE=${MINIO_CONSOLE_IMAGE:="minio/console:latest"} MINIO_CLIENT_IMAGE=${MINIO_CLIENT_IMAGE:="minio/mc:latest"} -MINIO_OPERATOR_IMAGE=${MINIO_OPERATOR_IMAGE:="minio/operator:v4.1.3"} +MINIO_OPERATOR_IMAGE=${MINIO_OPERATOR_IMAGE:="minio/operator:latest"} PROMETHEUS_RELOADER_IMAGE=${PROMETHEUS_RELOADER_IMAGE:="quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0"} PROMETHEUS_OPERATOR_IMAGE=${PROMETHEUS_OPERATOR_IMAGE:="quay.io/prometheus-operator/prometheus-operator:v0.68.0"} diff --git a/tests/regression.py b/tests/regression.py index 16ac1344a..338a18773 100755 --- a/tests/regression.py +++ b/tests/regression.py @@ -7,31 +7,31 @@ xfails = { # test_operator.py - "/regression/e2e.test_operator/test_008*": [(Fail, "Test 008 sometimes fails due to unknown reasons")], - "/regression/e2e.test_operator/test_030:": [(Fail, "FIXME: Test 030 started to fail in 0.24.1")], - "/regression/e2e.test_operator/test_032:": [(Fail, "Test 032 sometimes fails due to unknown reasons")], + "/regression/e2e?test_operator/test_008*": [(Fail, "Test 008 sometimes fails due to unknown reasons")], + "/regression/e2e?test_operator/test_030:": [(Fail, "FIXME: Test 030 started to fail in 0.24.1")], + "/regression/e2e?test_operator/test_032:": [(Fail, "Test 032 sometimes fails due to unknown reasons")], # test_clickhouse.py - "/regression/e2e.test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")], + "/regression/e2e?test_clickhouse/test_ch_001*": [(Fail, "Insert Quorum test need to refactoring")], # test_metrics_alerts.py - # "/regression/e2e.test_metrics_alerts/test_clickhouse_keeper_alerts*": [ + # "/regression/e2e?test_metrics_alerts/test_clickhouse_keeper_alerts*": [ # (Fail, "clickhouse-keeper wrong prometheus endpoint format, look https://github.com/ClickHouse/ClickHouse/issues/46136") # ], # test_keeper.py - # "/regression/e2e.test_keeper/test_clickhouse_keeper_rescale*": [ - # (Fail, "need `ruok` before quorum https://github.com/ClickHouse/ClickHouse/issues/35464, need apply file config instead use commited data for quorum https://github.com/ClickHouse/ClickHouse/issues/35465. --force-recovery useless https://github.com/ClickHouse/ClickHouse/issues/37434"), - # ], - # "/regression/e2e.test_metrics_alerts/test_clickhouse_dns_errors*": [ + "/regression/e2e?test_keeper/test_*_chk*": [ + (Fail, "need proper ClickHouseKeeperInstallation scale up and scale down implementation"), + ], + # "/regression/e2e?test_metrics_alerts/test_clickhouse_dns_errors*": [ # (Fail, "DNSError behavior changed on 21.9, look https://github.com/ClickHouse/ClickHouse/issues/29624") # ], # test_keeper.py - "/regression/e2e.test_keeper/test_zookeeper_operator_probes_workload*": [ - ( - Fail, - "zookeeper liveness probe doesn't work, wait when https://github.com/pravega/zookeeper-operator/pull/476 will merge", - ) - ], - # "/regression/e2e.test_keeper/test_clickhouse_keeper_probes_workload*": [ + # "/regression/e2e?test_keeper/test_zookeeper_operator_probes_workload*": [ + # ( + # Fail, + # "zookeeper liveness probe doesn't work, wait when https://github.com/pravega/zookeeper-operator/pull/476 will merge", + # ) + # ], + # "/regression/e2e?test_keeper/test_clickhouse_keeper_probes_workload*": [ # (Fail, "clickhouse-keeper fail after insert 10000 parts, look https://github.com/ClickHouse/ClickHouse/issues/35712") # ], }