From 84728ec179e43a344a9d4c572d6edb43f345ba0d Mon Sep 17 00:00:00 2001 From: zyy17 Date: Fri, 20 Sep 2024 12:25:49 +0800 Subject: [PATCH] test: add standalone wal e2e --- controllers/greptimedbstandalone/deployer.go | 6 +- .../greptimedbcluster/test_basic_cluster.go | 2 +- .../test_cluster_enable_flow.go | 2 +- .../test_cluster_enable_remote_wal.go | 2 +- .../test_cluster_standalone_wal.go | 111 ++++++++++++++++++ .../test_basic_standalone.go | 2 +- tests/e2e/helper/helper.go | 21 ++-- .../cluster/standalone-wal/cluster.yaml | 25 ++++ 8 files changed, 159 insertions(+), 12 deletions(-) create mode 100644 tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go create mode 100644 tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml diff --git a/controllers/greptimedbstandalone/deployer.go b/controllers/greptimedbstandalone/deployer.go index 24942834..12d0ecf6 100644 --- a/controllers/greptimedbstandalone/deployer.go +++ b/controllers/greptimedbstandalone/deployer.go @@ -158,8 +158,12 @@ func (d *StandaloneDeployer) deleteStorage(ctx context.Context, namespace, name constant.GreptimeDBComponentName: common.ResourceName(name, v1alpha1.StandaloneKind), } + if additionalLabels != nil { + matachedLabels = util.MergeStringMap(matachedLabels, additionalLabels) + } + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: util.MergeStringMap(matachedLabels, additionalLabels), + MatchLabels: matachedLabels, }) if err != nil { return err diff --git a/tests/e2e/greptimedbcluster/test_basic_cluster.go b/tests/e2e/greptimedbcluster/test_basic_cluster.go index 784021c3..c78a83d1 100644 --- a/tests/e2e/greptimedbcluster/test_basic_cluster.go +++ b/tests/e2e/greptimedbcluster/test_basic_cluster.go @@ -87,7 +87,7 @@ func TestBasicCluster(ctx context.Context, h *helper.Helper) { }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the datanode should be retained") - datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind) + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") diff --git a/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go b/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go index b3804267..a5801283 100644 --- a/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go +++ b/tests/e2e/greptimedbcluster/test_cluster_enable_flow.go @@ -87,7 +87,7 @@ func TestClusterEnableFlow(ctx context.Context, h *helper.Helper) { }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the datanode should be retained") - datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind) + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") diff --git a/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go b/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go index 05617c12..80f636c8 100644 --- a/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go +++ b/tests/e2e/greptimedbcluster/test_cluster_enable_remote_wal.go @@ -87,7 +87,7 @@ func TestClusterEnableRemoteWal(ctx context.Context, h *helper.Helper) { }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the datanode should be retained") - datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind) + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") diff --git a/tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go b/tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go new file mode 100644 index 00000000..7af21c4a --- /dev/null +++ b/tests/e2e/greptimedbcluster/test_cluster_standalone_wal.go @@ -0,0 +1,111 @@ +// Copyright 2024 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package greptimedbcluster + +import ( + "context" + "fmt" + "net" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/client" + + greptimev1alpha1 "github.com/GreptimeTeam/greptimedb-operator/apis/v1alpha1" + "github.com/GreptimeTeam/greptimedb-operator/controllers/common" + "github.com/GreptimeTeam/greptimedb-operator/tests/e2e/helper" +) + +// TestClusterStandaloneWAL tests a basic cluster. +func TestClusterStandaloneWAL(ctx context.Context, h *helper.Helper) { + const ( + testCRFile = "./testdata/resources/cluster/standalone-wal/cluster.yaml" + testSQLFile = "./testdata/sql/cluster/partition.sql" + ) + + By(fmt.Sprintf("greptimecluster test with CR file %s and SQL file %s", testCRFile, testSQLFile)) + + testCluster := new(greptimev1alpha1.GreptimeDBCluster) + err := h.LoadCR(testCRFile, testCluster) + Expect(err).NotTo(HaveOccurred(), "failed to load greptimedbcluster yaml file") + + err = h.Create(ctx, testCluster) + Expect(err).NotTo(HaveOccurred(), "failed to create greptimedbcluster") + + By("Check the status of testCluster") + Eventually(func() error { + clusterPhase, err := h.GetPhase(ctx, testCluster.Namespace, testCluster.Name, new(greptimev1alpha1.GreptimeDBCluster)) + if err != nil { + return err + } + + if clusterPhase != greptimev1alpha1.PhaseRunning { + return fmt.Errorf("cluster is not running") + } + + return nil + }, helper.DefaultTimeout, time.Second).ShouldNot(HaveOccurred()) + + By("Execute distributed SQL test") + frontendAddr, err := h.PortForward(ctx, testCluster.Namespace, common.ResourceName(testCluster.Name, greptimev1alpha1.FrontendComponentKind), int(testCluster.Spec.PostgreSQLPort)) + Expect(err).NotTo(HaveOccurred(), "failed to port forward frontend service") + Eventually(func() error { + conn, err := net.Dial("tcp", frontendAddr) + if err != nil { + return err + } + conn.Close() + return nil + }, helper.DefaultTimeout, time.Second).ShouldNot(HaveOccurred()) + + err = h.RunSQLTest(ctx, frontendAddr, testSQLFile) + Expect(err).NotTo(HaveOccurred(), "failed to run sql test") + + By("Kill the port forwarding process") + h.KillPortForwardProcess() + + By("Delete cluster") + err = h.Delete(ctx, testCluster) + Expect(err).NotTo(HaveOccurred(), "failed to delete cluster") + Eventually(func() error { + // The cluster will be deleted eventually. + return h.Get(ctx, client.ObjectKey{Name: testCluster.Namespace, Namespace: testCluster.Namespace}, testCluster) + }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) + + By("The PVC of the datanode should be retained") + datanodePVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, common.DatanodeFileStorageLabels) + Expect(err).NotTo(HaveOccurred(), "failed to get datanode PVCs") + Expect(int32(len(datanodePVCs))).To(Equal(*testCluster.Spec.Datanode.Replicas), "the number of datanode PVCs should be equal to the number of datanode replicas") + + By("The PVC of the WAL should be deleted") + Eventually(func() error { + walPVCs, err := h.GetPVCs(ctx, testCluster.Namespace, testCluster.Name, greptimev1alpha1.DatanodeComponentKind, common.WALFileStorageLabels) + if err != nil { + return err + } + if len(walPVCs) != 0 { + return fmt.Errorf("the number of WAL PVCs should be 0") + } + return nil + }, helper.DefaultTimeout, time.Second).ShouldNot(HaveOccurred()) + + By("Remove the PVC of the datanode") + for _, pvc := range datanodePVCs { + err = h.Delete(ctx, &pvc) + Expect(err).NotTo(HaveOccurred(), "failed to delete datanode PVC") + } +} diff --git a/tests/e2e/greptimedbstandalone/test_basic_standalone.go b/tests/e2e/greptimedbstandalone/test_basic_standalone.go index 096b320c..2478ecad 100644 --- a/tests/e2e/greptimedbstandalone/test_basic_standalone.go +++ b/tests/e2e/greptimedbstandalone/test_basic_standalone.go @@ -87,7 +87,7 @@ func TestBasicStandalone(ctx context.Context, h *helper.Helper) { }, helper.DefaultTimeout, time.Second).Should(HaveOccurred()) By("The PVC of the database should be retained") - dataPVCs, err := h.GetPVCs(ctx, testStandalone.Namespace, testStandalone.Name, greptimev1alpha1.StandaloneKind) + dataPVCs, err := h.GetPVCs(ctx, testStandalone.Namespace, testStandalone.Name, greptimev1alpha1.StandaloneKind, nil) Expect(err).NotTo(HaveOccurred(), "failed to get data PVCs") Expect(len(dataPVCs)).To(Equal(1), "the number of datanode PVCs should be equal to 1") diff --git a/tests/e2e/helper/helper.go b/tests/e2e/helper/helper.go index b4a63089..72bfaefa 100644 --- a/tests/e2e/helper/helper.go +++ b/tests/e2e/helper/helper.go @@ -23,6 +23,7 @@ import ( "os/exec" "time" + "github.com/GreptimeTeam/greptimedb-operator/pkg/util" "github.com/jackc/pgx/v5" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -124,24 +125,30 @@ func (h *Helper) GetPhase(ctx context.Context, namespace, name string, object cl } // GetPVCs returns the PVC list of the given component. -func (h *Helper) GetPVCs(ctx context.Context, namespace, name string, kind greptimev1alpha1.ComponentKind) ([]corev1.PersistentVolumeClaim, error) { +func (h *Helper) GetPVCs(ctx context.Context, namespace, name string, kind greptimev1alpha1.ComponentKind, additionalLabels map[string]string) ([]corev1.PersistentVolumeClaim, error) { + matachedLabels := map[string]string{ + constant.GreptimeDBComponentName: common.ResourceName(name, kind), + } + + if additionalLabels != nil { + matachedLabels = util.MergeStringMap(matachedLabels, additionalLabels) + } + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - constant.GreptimeDBComponentName: common.ResourceName(name, kind), - }, + MatchLabels: matachedLabels, }) if err != nil { return nil, err } - pvcList := new(corev1.PersistentVolumeClaimList) + claims := new(corev1.PersistentVolumeClaimList) - if err = h.List(ctx, pvcList, client.InNamespace(namespace), + if err = h.List(ctx, claims, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, err } - return pvcList.Items, nil + return claims.Items, nil } // CleanEtcdData cleans up all data in etcd by executing the etcdctl command in the given pod. diff --git a/tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml b/tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml new file mode 100644 index 00000000..bdbc167b --- /dev/null +++ b/tests/e2e/testdata/resources/cluster/standalone-wal/cluster.yaml @@ -0,0 +1,25 @@ +apiVersion: greptime.io/v1alpha1 +kind: GreptimeDBCluster +metadata: + name: cluster-with-standalone-wal + namespace: default +spec: + base: + main: + image: greptime/greptimedb:latest + frontend: + replicas: 1 + meta: + replicas: 1 + etcdEndpoints: + - "etcd.etcd-cluster:2379" + datanode: + replicas: 1 + wal: + raftEngine: + fs: + name: wal + storageClassName: standard + storageSize: 5Gi + mountPath: /wal + storageRetainPolicy: Delete # The wal will be deleted after cluster is destroyed.