diff --git a/tests/e2e/csi_cns_telemetry_statefulsets.go b/tests/e2e/csi_cns_telemetry_statefulsets.go index 37d1477e08..0c1f32d96c 100644 --- a/tests/e2e/csi_cns_telemetry_statefulsets.go +++ b/tests/e2e/csi_cns_telemetry_statefulsets.go @@ -161,7 +161,9 @@ var _ = ginkgo.Describe("[csi-block-vanilla] [csi-file-vanilla] [csi-supervisor] replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready. fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + if !windowsEnv { + gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + } ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) diff --git a/tests/e2e/csi_snapshot_basic.go b/tests/e2e/csi_snapshot_basic.go index 8ac4849255..19583ac29b 100644 --- a/tests/e2e/csi_snapshot_basic.go +++ b/tests/e2e/csi_snapshot_basic.go @@ -2260,7 +2260,9 @@ var _ = ginkgo.Describe("Volume Snapshot Basic Test", func() { // Waiting for pods status to be Ready fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + if !windowsEnv { + gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + } ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) diff --git a/tests/e2e/csi_snapshot_utils.go b/tests/e2e/csi_snapshot_utils.go index aa86422563..c377066aa2 100644 --- a/tests/e2e/csi_snapshot_utils.go +++ b/tests/e2e/csi_snapshot_utils.go @@ -649,15 +649,25 @@ func verifyVolumeRestoreOperation(ctx context.Context, client clientset.Interfac isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle2, vmUUID) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node") - + var cmd []string ginkgo.By("Verify the volume is accessible and Read/write is possible") - cmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "cat /mnt/volume1/Pod1.html "} + if windowsEnv { + cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "cat /mnt/volume1/Pod1.html"} + } else { + cmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "cat /mnt/volume1/Pod1.html "} + } output := e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from Pod1")).NotTo(gomega.BeFalse()) - wrtiecmd := []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", - "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + var wrtiecmd []string + if windowsEnv { + wrtiecmd = []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "Add-Content /mnt/volume1/Pod1.html 'Hello message from test into Pod1'"} + } else { + wrtiecmd = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", + "echo 'Hello message from test into Pod1' > /mnt/volume1/Pod1.html"} + } + e2ekubectl.RunKubectlOrDie(namespace, wrtiecmd...) output = e2ekubectl.RunKubectlOrDie(namespace, cmd...) gomega.Expect(strings.Contains(output, "Hello message from test into Pod1")).NotTo(gomega.BeFalse()) diff --git a/tests/e2e/csi_static_provisioning_basic.go b/tests/e2e/csi_static_provisioning_basic.go index aada0ccc37..c3421dc749 100644 --- a/tests/e2e/csi_static_provisioning_basic.go +++ b/tests/e2e/csi_static_provisioning_basic.go @@ -127,8 +127,10 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } if guestCluster { - svcClient, svNamespace := getSvcClientAndNamespace() - setResourceQuota(svcClient, svNamespace, rqLimit) + // Get a config to talk to the apiserver + restConfig := getRestConfigClient() + _, svNamespace := getSvcClientAndNamespace() + setStoragePolicyQuota(ctx, restConfig, storagePolicyName, svNamespace, rqLimit) } if os.Getenv(envFullSyncWaitTime) != "" { @@ -181,7 +183,6 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { if guestCluster { svcClient, svNamespace := getSvcClientAndNamespace() - setResourceQuota(svcClient, svNamespace, defaultrqLimit) dumpSvcNsEventsOnTestFailure(svcClient, svNamespace) } if supervisorCluster { @@ -212,7 +213,8 @@ var _ = ginkgo.Describe("Basic Static Provisioning", func() { framework.Logf("storageclass name :%s", storageclass.GetName()) ginkgo.By("create resource quota") - setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) + // setStoragePolicyQuota(ctx, restConfig, storagePolicyName, namespace, rqLimit) + createResourceQuota(client, namespace, rqLimit, storagePolicyName) return restConfig, storageclass, profileID } diff --git a/tests/e2e/e2e_common.go b/tests/e2e/e2e_common.go index 5e5b7650db..b54769948e 100644 --- a/tests/e2e/e2e_common.go +++ b/tests/e2e/e2e_common.go @@ -136,11 +136,12 @@ const ( fullSyncFss = "trigger-csi-fullsync" gcNodeUser = "vmware-system-user" gcKubeConfigPath = "GC_KUBE_CONFIG" + gcSshKey = "TEST-CLUSTER-SSH-KEY" healthGreen = "green" healthRed = "red" healthStatusAccessible = "accessible" healthStatusInAccessible = "inaccessible" - healthStatusWaitTime = 2 * time.Minute + healthStatusWaitTime = 3 * time.Minute hostdServiceName = "hostd" invalidFSType = "ext10" k8sPodTerminationTimeOut = 7 * time.Minute diff --git a/tests/e2e/fullsync_test_for_block_volume.go b/tests/e2e/fullsync_test_for_block_volume.go index bdb510b3d4..31554cc896 100644 --- a/tests/e2e/fullsync_test_for_block_volume.go +++ b/tests/e2e/fullsync_test_for_block_volume.go @@ -841,8 +841,8 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { // TODO: Replace static wait with polling ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow vsan-health to completely shutdown", - vsanHealthServiceWaitTime)) - time.Sleep(time.Duration(vsanHealthServiceWaitTime) * time.Second) + oneMinuteWaitTimeInSeconds)) + time.Sleep(time.Duration(oneMinuteWaitTimeInSeconds) * time.Second) ginkgo.By("when vsan-health is stopped, delete pod1") err = fpod.DeletePodWithWait(ctx, client, pod) @@ -910,6 +910,7 @@ var _ bool = ginkgo.Describe("full-sync-test", func() { ginkgo.By(fmt.Sprintf("Waiting for CNS volume %s to be deleted", volHandle)) err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) diff --git a/tests/e2e/gc_block_resize_retain_policy.go b/tests/e2e/gc_block_resize_retain_policy.go index 937c9ac301..06604e24c6 100644 --- a/tests/e2e/gc_block_resize_retain_policy.go +++ b/tests/e2e/gc_block_resize_retain_policy.go @@ -92,7 +92,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Set resource quota. ginkgo.By("Set Resource quota for GC") svcClient, svNamespace := getSvcClientAndNamespace() @@ -122,8 +126,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po pvDeleted = false // Replace second element with pod.Name. - cmd = []string{"exec", "", fmt.Sprintf("--namespace=%v", namespace), - "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + if windowsEnv { + cmd = []string{"exec", "", "--namespace=" + namespace, "powershell.exe", "cat", "/mnt/volume1/fstype.txt"} + } else { + cmd = []string{"exec", "", "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + } // Set up default pandora sync wait time. pandoraSyncWaitTime = defaultPandoraSyncWaitTime @@ -219,10 +226,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Delete POD. @@ -337,7 +349,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -347,7 +363,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -435,7 +452,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po _ = getPVCFromSupervisorCluster(svcPVCName) scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } scParameters[svStorageClassName] = storagePolicyName storageclassNewGC, err := createStorageClass(clientNewGc, scParameters, nil, v1.PersistentVolumeReclaimDelete, "", true, "") @@ -468,7 +489,12 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po gomega.Expect(volumeID).NotTo(gomega.BeEmpty()) ginkgo.By("Creating the PV") - pvNew := getPersistentVolumeSpec(svcPVCName, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + var pvNew *v1.PersistentVolume + if windowsEnv { + pvNew = getPersistentVolumeSpec(svcPVCName, v1.PersistentVolumeReclaimDelete, nil, ntfsFSType) + } else { + pvNew = getPersistentVolumeSpec(svcPVCName, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + } pvNew.Annotations = pvtemp.Annotations pvNew.Spec.StorageClassName = pvtemp.Spec.StorageClassName pvNew.Spec.CSI = pvtemp.Spec.CSI @@ -557,10 +583,17 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po framework.TestContext.KubeConfig = oldKubeConfig }() - cmd2 = []string{"exec", pod.Name, fmt.Sprintf("--namespace=%v", namespaceNewGC), - "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + if windowsEnv { + cmd2 = []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "cat", "/mnt/volume1/fstype.txt"} + } else { + cmd2 = []string{"exec", pod.Name, "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + } lastOutput := e2ekubectl.RunKubectlOrDie(namespaceNewGC, cmd2...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvcNew, err = waitForFSResize(pvcNew, clientNewGc) @@ -676,10 +709,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -743,7 +781,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -884,7 +923,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Tests with reclaimation po isGC2PVCreated = false scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } scParameters[svStorageClassName] = storagePolicyName storageclassInGC1, err := createStorageClass(client, scParameters, nil, v1.PersistentVolumeReclaimDelete, "", true, "") diff --git a/tests/e2e/gc_block_volume_expansion.go b/tests/e2e/gc_block_volume_expansion.go index 2a36aba5a5..382441b28e 100644 --- a/tests/e2e/gc_block_volume_expansion.go +++ b/tests/e2e/gc_block_volume_expansion.go @@ -86,7 +86,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Set resource quota. ginkgo.By("Set Resource quota for GC") @@ -114,7 +118,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { pvcDeleted = false // Replace second element with pod.Name. - cmd = []string{"exec", "", "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + if windowsEnv { + cmd = []string{"exec", "", "--namespace=" + namespace, "powershell.exe", "cat", "/mnt/volume1/fstype.txt"} + } else { + cmd = []string{"exec", "", "--namespace=" + namespace, "--", "/bin/sh", "-c", "df -Tkm | grep /mnt/volume1"} + } // Set up default pandora sync wait time. pandoraSyncWaitTime = defaultPandoraSyncWaitTime @@ -179,6 +187,12 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Creating pod to attach PV to the node") pod, err := createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + // Delete Pod. + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err := fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) @@ -191,10 +205,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) rand.New(rand.NewSource(time.Now().Unix())) @@ -209,9 +228,13 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { fmt.Println(op) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - - _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "$out = New-Object byte[] 536870912; (New-Object Random).NextBytes($out); [System.IO.File]::WriteAllBytes('/mnt/volume1/testdata2.txt', $out)"} + _ = e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...) + } else { + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, + fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) + } // Delete POD. ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) @@ -279,6 +302,12 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Creating a new pod to attach PV again to the node") pod, err = createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + // Delete Pod. + ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s", pod.Name, namespace)) + err := fpod.DeletePodWithWait(ctx, client, pod) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s", pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)) @@ -292,7 +321,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -302,7 +335,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -312,18 +346,29 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { } ginkgo.By("Checking data consistency after PVC resize") - _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "Copy-Item -Path '/mnt/volume1/testdata2.txt' -Destination '/mnt/volume1/testdata2_pod.txt'"} + _ = e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...) + } else { + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", + fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") + } defer func() { op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() fmt.Println("rm: ", op) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Running diff...") - op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() - fmt.Println("diff: ", op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(len(op)).To(gomega.BeZero()) + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "((Get-FileHash '/mnt/volume1/testdata2.txt' -Algorithm SHA256).Hash -eq (Get-FileHash '/mnt/volume1/testdata2_pod.txt' -Algorithm SHA256).Hash)"} + diffNotFound := strings.TrimSpace(e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...)) + gomega.Expect(diffNotFound).To(gomega.Equal("True")) + } else { + op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() + fmt.Println("diff: ", op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(op)).To(gomega.BeZero()) + } ginkgo.By("File system resize finished successfully in GC") ginkgo.By("Checking for PVC resize completion on SVC PVC") @@ -387,10 +432,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Delete POD. @@ -499,7 +549,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -509,7 +563,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -913,10 +968,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expanding current pvc") @@ -998,7 +1058,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -1008,7 +1072,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -1154,7 +1219,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } scParameters[svStorageClassName] = thickProvPolicy sc, pvc, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1193,10 +1262,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Delete POD. @@ -1241,9 +1315,9 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { pvc, err = checkPvcHasGivenStatusCondition(client, namespace, pvc.Name, true, v1.PersistentVolumeClaimResizing) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Checking for 'Resizing' status condition on SVC PVC") - _, err = checkSvcPvcHasGivenStatusCondition(svcPvcName, true, v1.PersistentVolumeClaimResizing) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // ginkgo.By("Checking for 'Resizing' status condition on SVC PVC") + // _, err = checkSvcPvcHasGivenStatusCondition(svcPvcName, true, v1.PersistentVolumeClaimResizing) + // gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Bringing GC CSI controller down...") isGCCSIDeploymentPODdown = true @@ -1299,7 +1373,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, client) @@ -1309,7 +1387,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -1353,7 +1432,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") setResourceQuota(client, namespace, rqLimit) scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } scParameters[svStorageClassName] = thickProvPolicy sc, pvc, err := createPVCAndStorageClass(ctx, client, namespace, nil, scParameters, "", nil, "", true, "") gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1443,37 +1526,60 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) - - rand.New(rand.NewSource(time.Now().Unix())) - testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) - ginkgo.By(fmt.Sprintf("Creating a 512mb test data file %v", testdataFile)) - op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), - "bs=64k", "count=8000").Output() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - op, err = exec.Command("rm", "-f", testdataFile).Output() + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } + var testdataFile string + var op []byte + if windowsEnv { + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile = fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 512mb test data file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=64k", "count=8000").Output() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } - _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "$out = New-Object byte[] 536870912; (New-Object Random).NextBytes($out); [System.IO.File]::WriteAllBytes('/mnt/volume1/testdata2.txt', $out)"} + _ = e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...) + } else { + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, + fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) + } onlineVolumeResizeCheck(f, client, namespace, svcPVCName, volHandle, pvclaim, pod) ginkgo.By("Checking data consistency after PVC resize") - _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "Copy-Item -Path '/mnt/volume1/testdata2.txt' -Destination '/mnt/volume1/testdata2_pod.txt'"} + _ = e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...) + } else { + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", + fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") + } defer func() { op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() fmt.Println("rm: ", op) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() ginkgo.By("Running diff...") - op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() - fmt.Println("diff: ", op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(len(op)).To(gomega.BeZero()) + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "((Get-FileHash '/mnt/volume1/testdata2.txt' -Algorithm SHA256).Hash -eq (Get-FileHash '/mnt/volume1/testdata2_pod.txt' -Algorithm SHA256).Hash)"} + diffNotFound := strings.TrimSpace(e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...)) + gomega.Expect(diffNotFound).To(gomega.Equal("True")) + } else { + op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() + fmt.Println("diff: ", op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(op)).To(gomega.BeZero()) + } ginkgo.By("File system resize finished successfully in GC") ginkgo.By("Checking for PVC resize completion on SVC PVC") @@ -1524,10 +1630,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Expanding current pvc") @@ -1575,7 +1686,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("FileSystemSize after PVC resize %d mb , FileSystemSize Before PVC resize %d mb ", fsSize, originalFsSize) @@ -1640,7 +1752,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { // Fetch original FileSystemSize. ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") - originalSizeInMb, err = getFSSizeMb(f, pod) + // originalSizeInMb, err = getFSSizeMb(f, pod) + originalSizeInMb, err = getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Set quota in SVC for 5Gi on policy(SC) - " + storagePolicyName) @@ -1705,7 +1818,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err = getFSSizeMb(f, pod) + // fsSize, err = getFSSizeMb(f, pod) + fsSize, err = getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume // so here we are checking if the new filesystem size is greater than @@ -2048,10 +2162,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2136,7 +2255,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -2146,7 +2269,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -2202,10 +2326,15 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify the volume is accessible and filesystem type is as expected") cmd[1] = pod.Name lastOutput := e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion") - originalFsSize, err := getFSSizeMb(f, pod) + // originalFsSize, err := getFSSizeMb(f, pod) + originalFsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { @@ -2280,7 +2409,11 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { ginkgo.By("Verify after expansion the filesystem type is as expected") cmd[1] = pod.Name lastOutput = e2ekubectl.RunKubectlOrDie(namespace, cmd...) - gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + if windowsEnv { + gomega.Expect(strings.Contains(lastOutput, ntfsFSType)).NotTo(gomega.BeFalse()) + } else { + gomega.Expect(strings.Contains(lastOutput, ext4FSType)).NotTo(gomega.BeFalse()) + } ginkgo.By("Waiting for file system resize to finish") pvclaim, err = waitForFSResize(pvclaim, client) @@ -2290,7 +2423,8 @@ var _ = ginkgo.Describe("[csi-guest] Volume Expansion Test", func() { expectEqual(len(pvcConditions), 0, "pvc should not have conditions") ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion") - fsSize, err := getFSSizeMb(f, pod) + // fsSize, err := getFSSizeMb(f, pod) + fsSize, err := getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Filesystem size may be smaller than the size of the block volume. // Here since filesystem was already formatted on the original volume, @@ -2485,7 +2619,8 @@ func onlineVolumeResizeCheck(f *framework.Framework, client clientset.Interface, var err error // Fetch original FileSystemSize. ginkgo.By("Verify filesystem size for mount point /mnt/volume1 before expansion") - originalSizeInMb, err = getFSSizeMb(f, pod) + // originalSizeInMb, err = getFSSizeMb(f, pod) + originalSizeInMb, err = getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Resize PVC. @@ -2520,7 +2655,8 @@ func onlineVolumeResizeCheck(f *framework.Framework, client clientset.Interface, var fsSize int64 ginkgo.By("Verify filesystem size for mount point /mnt/volume1") - fsSize, err = getFSSizeMb(f, pod) + // fsSize, err = getFSSizeMb(f, pod) + fsSize, err = getFileSystemSizeForOsType(f, client, pod) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.Logf("File system size after expansion : %s", fsSize) // Filesystem size may be smaller than the size of the block volume diff --git a/tests/e2e/gc_cns_nodevm_attachment.go b/tests/e2e/gc_cns_nodevm_attachment.go index 3ff8031503..b974b15575 100644 --- a/tests/e2e/gc_cns_nodevm_attachment.go +++ b/tests/e2e/gc_cns_nodevm_attachment.go @@ -314,7 +314,11 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { ginkgo.By("Create a Pod with PVC created in previous step mounted as a volume") pod := fpod.MakePod(namespace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") - pod.Spec.Containers[0].Image = busyBoxImageOnGcr + if windowsEnv { + pod.Spec.Containers[0].Image = windowsImageOnMcr + } else { + pod.Spec.Containers[0].Image = busyBoxImageOnGcr + } pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -409,7 +413,9 @@ var _ = ginkgo.Describe("[csi-guest] CnsNodeVmAttachment persistence", func() { // Waiting for pods status to be Ready. ginkgo.By("Wait for all Pods are Running state") fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + if !windowsEnv { + gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + } ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) diff --git a/tests/e2e/gc_metadata_syncer.go b/tests/e2e/gc_metadata_syncer.go index 4e511193ab..d8b620e0a5 100644 --- a/tests/e2e/gc_metadata_syncer.go +++ b/tests/e2e/gc_metadata_syncer.go @@ -42,17 +42,17 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { f := framework.NewDefaultFramework("e2e-guest-cluster-cnsvolumemetadata") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged var ( - client clientset.Interface - namespace string - svNamespace string - scParameters map[string]string - storagePolicyName string - svcPVCName string // PVC Name in the Supervisor Cluster - labelKey string - labelValue string - gcClusterID string - pvcUID string - manifestPath = "tests/e2e/testing-manifests/statefulset/nginx" + client clientset.Interface + namespace string + svNamespace string + scParameters map[string]string + storagePolicyName string + svcPVCName string // PVC Name in the Supervisor Cluster + labelKey string + labelValue string + gcClusterID string + pvcUID string + // manifestPath = "tests/e2e/testing-manifests/statefulset/nginx" pvclabelKey string pvclabelValue string pvlabelKey string @@ -403,8 +403,12 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() - ginkgo.By("Creating statefulset") - statefulset := fss.CreateStatefulSet(ctx, client, manifestPath, namespace) + // ginkgo.By("Creating statefulset") + // statefulset := fss.CreateStatefulSet(ctx, client, manifestPath, namespace) + statefulset := GetStatefulSetFromManifest(namespace) + + ginkgo.By("Create a statefulset with 3 replicas") + CreateStatefulSet(namespace, statefulset, client) defer func() { ginkgo.By(fmt.Sprintf("Deleting all statefulsets in namespace: %v", namespace)) fss.DeleteAllStatefulSets(ctx, client, namespace) @@ -417,7 +421,9 @@ var _ = ginkgo.Describe("[csi-guest] pvCSI metadata syncer tests", func() { replicas := *(statefulset.Spec.Replicas) // Waiting for pods status to be Ready. fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + if !windowsEnv { + gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + } ssPodsBeforeScaleup := fss.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsBeforeScaleup.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) diff --git a/tests/e2e/improved_csi_idempotency.go b/tests/e2e/improved_csi_idempotency.go index 396e708794..cd4c2bdced 100644 --- a/tests/e2e/improved_csi_idempotency.go +++ b/tests/e2e/improved_csi_idempotency.go @@ -678,7 +678,11 @@ func extendVolumeWithServiceDown(serviceName string, namespace string, client cl } createResourceQuota(client, namespace, rqLimit, thickProvPolicy) scParameters[svStorageClassName] = thickProvPolicy - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } storageclass, err = client.StorageV1().StorageClasses().Get(ctx, thickProvPolicy, metav1.GetOptions{}) if !apierrors.IsNotFound(err) { gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/tests/e2e/tkgs_ha_utils.go b/tests/e2e/tkgs_ha_utils.go index 82eaf7daa2..01fc61bc3e 100644 --- a/tests/e2e/tkgs_ha_utils.go +++ b/tests/e2e/tkgs_ha_utils.go @@ -244,35 +244,56 @@ func verifyVolumeProvisioningWithServiceDown(serviceName string, namespace strin // verifyOnlineVolumeExpansionOnGc is a util method which helps in verifying online volume expansion on gc func verifyOnlineVolumeExpansionOnGc(client clientset.Interface, namespace string, svcPVCName string, volHandle string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod, f *framework.Framework) { - rand.New(rand.NewSource(time.Now().Unix())) - testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) - ginkgo.By(fmt.Sprintf("Creating a 512mb test data file %v", testdataFile)) - op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), - "bs=64k", "count=8000").Output() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer func() { - op, err = exec.Command("rm", "-f", testdataFile).Output() + var testdataFile string + var op []byte + var err error + if !windowsEnv { + rand.New(rand.NewSource(time.Now().Unix())) + testdataFile = fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000)) + ginkgo.By(fmt.Sprintf("Creating a 512mb test data file %v", testdataFile)) + op, err = exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile), + "bs=64k", "count=8000").Output() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() + defer func() { + op, err = exec.Command("rm", "-f", testdataFile).Output() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } - _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "$out = New-Object byte[] 536870912; (New-Object Random).NextBytes($out); [System.IO.File]::WriteAllBytes('/mnt/volume1/testdata2.txt', $out)"} + _ = e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...) + } else { + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile, + fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name)) + } onlineVolumeResizeCheck(f, client, namespace, svcPVCName, volHandle, pvclaim, pod) ginkgo.By("Checking data consistency after PVC resize") - _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", - fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") - defer func() { - op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() - fmt.Println("rm: ", op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "Copy-Item -Path '/mnt/volume1/testdata2.txt' -Destination '/mnt/volume1/testdata2_pod.txt'"} + _ = e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...) + } else { + _ = e2ekubectl.RunKubectlOrDie(namespace, "cp", + fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod") + defer func() { + op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output() + fmt.Println("rm: ", op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } ginkgo.By("Running diff...") - op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() - fmt.Println("diff: ", op) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(len(op)).To(gomega.BeZero()) + if windowsEnv { + cmdTestData := []string{"exec", pod.Name, "--namespace=" + namespace, "powershell.exe", "((Get-FileHash '/mnt/volume1/testdata2.txt' -Algorithm SHA256).Hash -eq (Get-FileHash '/mnt/volume1/testdata2_pod.txt' -Algorithm SHA256).Hash)"} + diffNotFound := strings.TrimSpace(e2ekubectl.RunKubectlOrDie(namespace, cmdTestData...)) + gomega.Expect(diffNotFound).To(gomega.Equal("True")) + } else { + op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output() + fmt.Println("diff: ", op) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(op)).To(gomega.BeZero()) + } ginkgo.By("File system resize finished successfully in GC") ginkgo.By("Checking for PVC resize completion on SVC PVC") diff --git a/tests/e2e/util.go b/tests/e2e/util.go index 2456b8deb9..6ed8b35c9f 100644 --- a/tests/e2e/util.go +++ b/tests/e2e/util.go @@ -1928,6 +1928,7 @@ func getWCPSessionId(hostname string, username string, password string) string { // getWindowsFileSystemSize finds the windowsWorkerIp and returns the size of the volume func getWindowsFileSystemSize(client clientset.Interface, pod *v1.Pod) (int64, error) { var err error + var output fssh.Result var windowsWorkerIP, size string ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1940,17 +1941,33 @@ func getWindowsFileSystemSize(client clientset.Interface, pod *v1.Pod) (int64, e break } } - nimbusGeneratedWindowsVmPwd := GetAndExpectStringEnvVar(envWindowsPwd) - windowsUser := GetAndExpectStringEnvVar(envWindowsUser) - sshClientConfig := &ssh.ClientConfig{ - User: windowsUser, - Auth: []ssh.AuthMethod{ - ssh.Password(nimbusGeneratedWindowsVmPwd), - }, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } cmd := "Get-Disk | Format-List -Property Manufacturer,Size" - output, err := sshExec(sshClientConfig, windowsWorkerIP, cmd) + if guestCluster { + svcMasterIp := GetAndExpectStringEnvVar(svcMasterIP) + svcMasterPwd := GetAndExpectStringEnvVar(svcMasterPassword) + svcNamespace = GetAndExpectStringEnvVar(envSupervisorClusterNamespace) + sshWcpConfig := &ssh.ClientConfig{ + User: rootUser, + Auth: []ssh.AuthMethod{ + ssh.Password(svcMasterPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + output, err = execCommandOnGcWorker(sshWcpConfig, svcMasterIp, windowsWorkerIP, + svcNamespace, cmd) + } else { + nimbusGeneratedWindowsVmPwd := GetAndExpectStringEnvVar(envWindowsPwd) + windowsUser := GetAndExpectStringEnvVar(envWindowsUser) + sshClientConfig := &ssh.ClientConfig{ + User: windowsUser, + Auth: []ssh.AuthMethod{ + ssh.Password(nimbusGeneratedWindowsVmPwd), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + output, err = sshExec(sshClientConfig, windowsWorkerIP, cmd) + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) fullStr := strings.Split(strings.TrimSuffix(string(output.Stdout), "\n"), "\n") var originalSizeInbytes int64 @@ -1962,8 +1979,14 @@ func getWindowsFileSystemSize(client clientset.Interface, pod *v1.Pod) (int64, e if err != nil { return -1, fmt.Errorf("failed to parse size %s into int size", size) } - if originalSizeInbytes < 96636764160 { - break + if guestCluster { + if originalSizeInbytes < 42949672960 { + break + } + } else { + if originalSizeInbytes < 96636764160 { + break + } } } } @@ -7026,3 +7049,46 @@ func removeStoragePolicyQuota(ctx context.Context, restClientConfig *rest.Config framework.Logf("Quota after removing: %s", spq.Spec.Limit) } + +// execCommandOnGcWorker logs into gc worker node using ssh private key and executes command +func execCommandOnGcWorker(sshClientConfig *ssh.ClientConfig, svcMasterIP string, gcWorkerIp string, + svcNamespace string, cmd string) (fssh.Result, error) { + result := fssh.Result{Host: gcWorkerIp, Cmd: cmd} + // get the cluster ssh key + sshSecretName := GetAndExpectStringEnvVar(sshSecretName) + cmdToGetPrivateKey := fmt.Sprintf("kubectl get secret %s -n %s -o"+ + "jsonpath={'.data.ssh-privatekey'} | base64 -d > key", sshSecretName, svcNamespace) + framework.Logf("Invoking command '%v' on host %v", cmdToGetPrivateKey, + svcMasterIP) + cmdResult, err := sshExec(sshClientConfig, svcMasterIP, + cmdToGetPrivateKey) + if err != nil || cmdResult.Code != 0 { + fssh.LogResult(cmdResult) + return result, fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + cmdToGetPrivateKey, svcMasterIP, err) + } + + enablePermissionCmd := "chmod 600 key" + framework.Logf("Invoking command '%v' on host %v", enablePermissionCmd, + svcMasterIP) + cmdResult, err = sshExec(sshClientConfig, svcMasterIP, + enablePermissionCmd) + if err != nil || cmdResult.Code != 0 { + fssh.LogResult(cmdResult) + return result, fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + enablePermissionCmd, svcMasterIP, err) + } + + cmdToGetContainerInfo := fmt.Sprintf("ssh -i key %s@%s "+ + "'%s'", gcNodeUser, gcWorkerIp, cmd) + framework.Logf("Invoking command '%v' on host %v", cmdToGetContainerInfo, + svcMasterIP) + cmdResult, err = sshExec(sshClientConfig, svcMasterIP, + cmdToGetContainerInfo) + if err != nil || cmdResult.Code != 0 { + fssh.LogResult(cmdResult) + return result, fmt.Errorf("couldn't execute command: %s on host: %v , error: %s", + cmdToGetContainerInfo, svcMasterIP, err) + } + return cmdResult, nil +} diff --git a/tests/e2e/vm_relocate_test.go b/tests/e2e/vm_relocate_test.go new file mode 100644 index 0000000000..0c6f8d80ac --- /dev/null +++ b/tests/e2e/vm_relocate_test.go @@ -0,0 +1,674 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + fnodes "k8s.io/kubernetes/test/e2e/framework/node" + fpv "k8s.io/kubernetes/test/e2e/framework/pv" + admissionapi "k8s.io/pod-security-admission/api" + ctlrclient "sigs.k8s.io/controller-runtime/pkg/client" + + cnsop "sigs.k8s.io/vsphere-csi-driver/v3/pkg/apis/cnsoperator" +) + +var _ bool = ginkgo.Describe("[vm-relocate] vm service vm relocation tests", func() { + + f := framework.NewDefaultFramework("vm-relocate") + f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged + f.SkipNamespaceCreation = true // tests will create their own namespaces + var ( + client clientset.Interface + namespace string + datastoreURL string + storagePolicyName string + storageClassName string + storageProfileId string + vcRestSessionId string + vmi string + vmClass string + vmopC ctlrclient.Client + cnsopC ctlrclient.Client + isVsanHealthServiceStopped bool + isSPSserviceStopped bool + vcAddress string + isServiceStopped bool + // migrationDone bool + // sshClientConfig *ssh.ClientConfig + ) + + ginkgo.BeforeEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client = f.ClientSet + var err error + topologyFeature := os.Getenv(topologyFeature) + if topologyFeature != topologyTkgHaName { + nodeList, err := fnodes.GetReadySchedulableNodes(ctx, f.ClientSet) + framework.ExpectNoError(err, "Unable to find ready and schedulable Node") + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + storagePolicyName = GetAndExpectStringEnvVar(envStoragePolicyNameForSharedDatastores) + } else { + storagePolicyName = GetAndExpectStringEnvVar(envZonalStoragePolicyName) + } + bootstrap() + isVsanHealthServiceStopped = false + isSPSserviceStopped = false + vcAddress = e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + vcRestSessionId = createVcSession4RestApis(ctx) + + storageClassName = strings.ReplaceAll(storagePolicyName, "_", "-") // since this is a wcp setup + + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + dsRef := getDsMoRefFromURL(ctx, datastoreURL) + framework.Logf("dsmoId: %v", dsRef.Value) + + storageProfileId = e2eVSphere.GetSpbmPolicyID(storagePolicyName) + contentLibId := createAndOrGetContentlibId4Url(vcRestSessionId, GetAndExpectStringEnvVar(envContentLibraryUrl), + dsRef.Value, GetAndExpectStringEnvVar(envContentLibraryUrlSslThumbprint)) + + framework.Logf("Create a WCP namespace for the test") + vmClass = os.Getenv(envVMClass) + if vmClass == "" { + vmClass = vmClassBestEffortSmall + } + namespace = createTestWcpNs( + vcRestSessionId, storageProfileId, vmClass, contentLibId, getSvcId(vcRestSessionId)) + + vmopScheme := runtime.NewScheme() + gomega.Expect(vmopv1.AddToScheme(vmopScheme)).Should(gomega.Succeed()) + vmopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: vmopScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cnsOpScheme := runtime.NewScheme() + gomega.Expect(cnsop.AddToScheme(cnsOpScheme)).Should(gomega.Succeed()) + cnsopC, err = ctlrclient.New(f.ClientConfig(), ctlrclient.Options{Scheme: cnsOpScheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + vmImageName := GetAndExpectStringEnvVar(envVmsvcVmImageName) + framework.Logf("Waiting for virtual machine image list to be available in namespace '%s' for image '%s'", + namespace, vmImageName) + vmi = waitNGetVmiForImageName(ctx, vmopC, namespace, vmImageName) + gomega.Expect(vmi).NotTo(gomega.BeEmpty()) + }) + + ginkgo.AfterEach(func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if isVsanHealthServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isVsanHealthServiceStopped) + } + + if isSPSserviceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", spsServiceName)) + startVCServiceWait4VPs(ctx, vcAddress, vsanhealthServiceName, &isSPSserviceStopped) + } + dumpSvcNsEventsOnTestFailure(client, namespace) + delTestWcpNs(vcRestSessionId, namespace) + gomega.Expect(waitForNamespaceToGetDeleted(ctx, client, namespace, poll, pollTimeout)).To(gomega.Succeed()) + }) + + /* + Basic test + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create a PVC say pvc1 + 3 Create a VMservice VM say vm1, pvc1 + 4 verify pvc1 CNS metadata. + 5 Once the vm1 is up verify that the volume is accessible inside vm1 + 6 Perform ds migration + 7 Verify Vm is still up and volume is accessible inside vm1 + 8 Delete vm1 + 9 delete pvc1 + 10 Remove spbm policy attached to test namespace + */ + ginkgo.It("verify vmservice vm relocation and accessibility", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmPath := "/test-vpx-1726143741-210830-wcp.wcp-sanity/vm/Namespaces/" + vm.Namespace + "/" + vm.Name + var volFolder []string + for i, vol := range vm.Status.Volumes { + volFolderCreated := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + volFolder = append(volFolder, volFolderCreated) + } + + var wg sync.WaitGroup + errChan := make(chan error, 10) + // Add three tasks to the WaitGroup + wg.Add(2) + + // Start task1 and task2 in separate goroutines + go ioOperationsOnVolumes(&wg, errChan, vmIp, volFolder) + go migrationToAnotherDatastore(&wg, errChan, "nfs1", vmPath) + + // Wait for task1 to complete + go func() { + wg.Wait() // This will wait until task1 and task2 are complete + fmt.Println("Task 1 and Task 2 completed. Starting Task 3 & 4.") + wg.Add(2) // Add task3 to the WaitGroup + // Start task1 and task2 in separate goroutines + go ioOperationsOnVolumes(&wg, errChan, vmIp, volFolder) + go migrationToAnotherDatastore(&wg, errChan, "sharedVmfs-0", vmPath) + }() + // Create a goroutine to close the error channel once all tasks are done + go func() { + wg.Wait() + close(errChan) + }() + + // Collecting errors + var errors []error + for err := range errChan { + if err != nil { + errors = append(errors, err) + } + } + + // Process errors + if len(errors) > 0 { + fmt.Println("Errors occurred:") + for _, err := range errors { + fmt.Println(err) + } + } else { + fmt.Println("All tasks completed successfully.") + } + + }) + + /* + VSAN-HEALTH DOWN SCENARIO + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create a PVC say pvc1 + 3 Create a VMservice VM say vm1, pvc1 + 4 verify pvc1 CNS metadata. + 5 Once the vm1 is up verify that the volume is accessible inside vm1 + 6 Perform VmSvc VM migration + 7 Verify Vm is still up and volume is accessible inside vm1 + 8 Put down VSAN-HEALTH + 9 VmSvc VM migration should fail + 10 Wait for VSAN-HEALTH to come up + 11 Perform VmSvc VM migration + 12 Verify Vm is still up and volume is accessible inside vm1 + 13 Delete vm1 + 14 delete pvc1 + 15 Remove spbm policy attached to test namespace + */ + ginkgo.It("verify vmservice vm relocation when VSAN-HEALTH is down", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmPath := "/test-vpx-1726143741-210830-wcp.wcp-sanity/vm/Namespaces/" + vm.Namespace + "/" + vm.Name + var volFolder []string + for i, vol := range vm.Status.Volumes { + volFolderCreated := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + volFolder = append(volFolder, volFolderCreated) + } + + var wg sync.WaitGroup + errChan := make(chan error, 10) + // Add three tasks to the WaitGroup + wg.Add(2) + + // // Start task1 and task2 in separate goroutines + go ioOperationsOnVolumes(&wg, errChan, vmIp, volFolder) + go migrationToAnotherDatastore(&wg, errChan, "nfs1", vmPath) + + // Stop VSAN-HEALTH service + isServiceStopped = false + ginkgo.By(fmt.Sprintf("Stopping %v on the vCenter host", vsanhealthServiceName)) + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + err = invokeVCenterServiceControl(ctx, stopOperation, vsanhealthServiceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = true + err = waitVCenterServiceToBeInState(ctx, vsanhealthServiceName, vcAddress, svcStoppedMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + defer func() { + if isServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", vsanhealthServiceName)) + err = invokeVCenterServiceControl(ctx, startOperation, vsanhealthServiceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, vsanhealthServiceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = false + } + }() + + // Wait for task1 to complete + go func() { + wg.Wait() // This will wait until task1 and task2 are complete + fmt.Println("Task 1 and Task 2 completed. Starting Task 3 & 4.") + wg.Add(2) // Add task3 to the WaitGroup + // Start task1 and task2 in separate goroutines + go ioOperationsOnVolumes(&wg, errChan, vmIp, volFolder) + go migrationToAnotherDatastore(&wg, errChan, "sharedVmfs-0", vmPath) + }() + // Create a goroutine to close the error channel once all tasks are done + go func() { + wg.Wait() + close(errChan) + }() + + // Collecting errors + var errors []error + for err := range errChan { + if err != nil { + errors = append(errors, err) + } + } + + // Process errors + if len(errors) > 0 { + fmt.Println("Errors occurred:") + for _, err := range errors { + fmt.Println(err) + } + } else { + fmt.Println("All tasks completed successfully.") + } + + }) + + /* + SPS DOWN SCENARIO + Steps: + 1 Assign a spbm policy to test namespace with sufficient quota + 2 Create a PVC say pvc1 + 3 Create a VMservice VM say vm1, pvc1 + 4 verify pvc1 CNS metadata. + 5 Once the vm1 is up verify that the volume is accessible inside vm1 + 6 Perform VmSvc VM migration + 7 Verify Vm is still up and volume is accessible inside vm1 + 8 Put down SPS + 9 VmSvc VM migration should fail + 10 Wait for SPS to come up + 11 Perform VmSvc VM migration + 12 Verify Vm is still up and volume is accessible inside vm1 + 13 Delete vm1 + 14 delete pvc1 + 15 Remove spbm policy attached to test namespace + */ + ginkgo.It("verify vmservice vm relocation when SPS is down", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var pandoraSyncWaitTime int + var err error + if os.Getenv(envPandoraSyncWaitTime) != "" { + pandoraSyncWaitTime, err = strconv.Atoi(os.Getenv(envPandoraSyncWaitTime)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + pandoraSyncWaitTime = defaultPandoraSyncWaitTime + } + datastoreURL = GetAndExpectStringEnvVar(envSharedDatastoreURL) + datastore := getDsMoRefFromURL(ctx, datastoreURL) + ginkgo.By("Creating FCD Disk") + fcdID, err := e2eVSphere.createFCD(ctx, fcdName, diskSizeInMb, datastore.Reference()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By(fmt.Sprintf("Sleeping for %v seconds to allow newly created FCD:%s to sync with pandora", + pandoraSyncWaitTime, fcdID)) + time.Sleep(time.Duration(pandoraSyncWaitTime) * time.Second) + + ginkgo.By(fmt.Sprintf("Creating the PV with the fcdID %s", fcdID)) + staticPVLabels := make(map[string]string) + staticPVLabels["fcd-id"] = fcdID + staticPv := getPersistentVolumeSpec(fcdID, v1.PersistentVolumeReclaimDelete, nil, ext4FSType) + staticPv, err = client.CoreV1().PersistentVolumes().Create(ctx, staticPv, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = e2eVSphere.waitForCNSVolumeToBeCreated(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Creating a static PVC") + staticPvc := getPersistentVolumeClaimSpec(namespace, staticPVLabels, staticPv.Name) + staticPvc, err = client.CoreV1().PersistentVolumeClaims(namespace).Create( + ctx, staticPvc, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Create a storageclass") + storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Create a PVC") + pvc, err := createPVC(ctx, client, namespace, nil, "", storageclass, "") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for all claims to be in bound state") + pvs, err := fpv.WaitForPVClaimBoundPhase(ctx, client, []*v1.PersistentVolumeClaim{pvc, staticPvc}, pollTimeout) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + pv := pvs[0] + volHandle := pv.Spec.CSI.VolumeHandle + gomega.Expect(volHandle).NotTo(gomega.BeEmpty()) + defer func() { + ginkgo.By("Delete PVCs") + err = fpv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fpv.DeletePersistentVolumeClaim(ctx, client, staticPvc.Name, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for CNS volumes to be deleted") + err = e2eVSphere.waitForCNSVolumeToBeDeleted(volHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = e2eVSphere.waitForCNSVolumeToBeDeleted(staticPv.Spec.CSI.VolumeHandle) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM bootstrap data") + secretName := createBootstrapSecretForVmsvcVms(ctx, client, namespace) + defer func() { + ginkgo.By("Deleting VM bootstrap data") + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, *metav1.NewDeleteOptions(0)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + ginkgo.By("Creating VM") + vm := createVmServiceVmWithPvcs( + ctx, vmopC, namespace, vmClass, []*v1.PersistentVolumeClaim{pvc, staticPvc}, vmi, storageClassName, secretName) + defer func() { + ginkgo.By("Deleting VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachine{ObjectMeta: metav1.ObjectMeta{ + Name: vm.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Creating loadbalancing service for ssh with the VM") + vmlbsvc := createService4Vm(ctx, vmopC, namespace, vm.Name) + defer func() { + ginkgo.By("Deleting loadbalancing service for ssh with the VM") + err = vmopC.Delete(ctx, &vmopv1.VirtualMachineService{ObjectMeta: metav1.ObjectMeta{ + Name: vmlbsvc.Name, + Namespace: namespace, + }}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + + ginkgo.By("Wait for VM to come up and get an IP") + vmIp, err := waitNgetVmsvcVmIp(ctx, vmopC, namespace, vm.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Wait and verify PVCs are attached to the VM") + gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, + []*v1.PersistentVolumeClaim{pvc, staticPvc})).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Verify PVCs are accessible to the VM") + ginkgo.By("Write some IO to the CSI volumes and read it back from them and verify the data integrity") + vm, err = getVmsvcVM(ctx, vmopC, vm.Namespace, vm.Name) // refresh vm info + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + vmPath := "/test-vpx-1726143741-210830-wcp.wcp-sanity/vm/Namespaces/" + vm.Namespace + "/" + vm.Name + var volFolder []string + for i, vol := range vm.Status.Volumes { + volFolderCreated := formatNVerifyPvcIsAccessible(vol.DiskUuid, i+1, vmIp) + volFolder = append(volFolder, volFolderCreated) + } + + var wg sync.WaitGroup + errChan := make(chan error, 10) + // Add three tasks to the WaitGroup + wg.Add(3) + + // Start task1 and task2 in separate goroutines + go ioOperationsOnVolumes(&wg, errChan, vmIp, volFolder) + go migrationToAnotherDatastore(&wg, errChan, "nfs1", vmPath) + go stopRequiredService(&wg, ctx, spsServiceName) + + // Create a goroutine to close the error channel once all tasks are done + go func() { + wg.Wait() + close(errChan) + }() + + // Collecting errors + var errors []error + for err := range errChan { + if err != nil { + errors = append(errors, err) + } + } + + // Process errors + if len(errors) > 0 { + fmt.Println("Errors occurred:") + for _, err := range errors { + fmt.Println(err) + } + } else { + fmt.Println("All tasks completed successfully.") + } + + }) + +}) diff --git a/tests/e2e/vmservice_utils.go b/tests/e2e/vmservice_utils.go index f198187c8e..0c5be96fc2 100644 --- a/tests/e2e/vmservice_utils.go +++ b/tests/e2e/vmservice_utils.go @@ -25,9 +25,11 @@ import ( "math/rand" "net/http" "os" + "os/exec" "reflect" "strconv" "strings" + "sync" "time" "github.com/onsi/ginkgo/v2" @@ -896,3 +898,71 @@ func performVolumeLifecycleActionForVmServiceVM(ctx context.Context, client clie gomega.Expect(waitNverifyPvcsAreAttachedToVmsvcVm(ctx, vmopC, cnsopC, vm, []*v1.PersistentVolumeClaim{pvc})).NotTo(gomega.HaveOccurred()) } + +// Perform IO operations on vmSvc VM volumes +func ioOperationsOnVolumes(wg *sync.WaitGroup, errChan chan<- error, vmIp string, volFolder []string) { + defer wg.Done() // Notify that this goroutine is done + for i := 0; i < 20; i++ { + framework.Logf("Task ioOperationsOnVolumes - Iteration %s", i) + time.Sleep(10 * time.Second) + // IO operation on volumes + for i := range volFolder { + verifyDataIntegrityOnVmDisk(vmIp, volFolder[i]) + } + } +} + +// performing vmsvc-vm migration +func migrationToAnotherDatastore(wg *sync.WaitGroup, errChan chan<- error, datastoreName string, vm string) { + defer wg.Done() // Notify that this goroutine is done + framework.Logf("Task migrationToAnotherDatastore") + err := migrateVmSvcVmToAnotherDatastore(datastoreName, vm) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Migration of vms failed") +} + +// migrateVmSvcVmToAnotherDatastore method is used to migrate a VM to another datastore +func migrateVmSvcVmToAnotherDatastore(datastoreName string, vm string) error { + UnMountDsOnCluster := govcLoginCmdTestUser() + "govc vm.migrate -ds " + datastoreName + " " + vm + framework.Logf("Migrate vm to another datastore %s - command : %s", datastoreName, UnMountDsOnCluster) + _, err := exec.Command("/bin/sh", "-c", UnMountDsOnCluster).Output() + framework.Logf("Migrate vm to another datastore - after exec command") + if err != nil { + framework.Logf("Error: %v\n", err) + return fmt.Errorf("couldn't execute command: %s, error: %s", + UnMountDsOnCluster, err) + } + return nil +} + +// govc login cmd +func govcLoginCmdTestUser() string { + loginCmd := "export GOVC_INSECURE=1;" + loginCmd += fmt.Sprintf("export GOVC_URL='https://%s:%s@%s:%s';", + "testUser@vsphere.local", e2eVSphere.Config.Global.Password, + e2eVSphere.Config.Global.VCenterHostname, e2eVSphere.Config.Global.VCenterPort) + return loginCmd +} + +func stopRequiredService(wg *sync.WaitGroup, ctx context.Context, serviceName string) bool { + defer wg.Done() // Notify that this goroutine is done + time.Sleep(20 * time.Second) + // Stop SPS service + framework.Logf("Stopping %v on the vCenter host", serviceName) + vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort + err := invokeVCenterServiceControl(ctx, stopOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped := true + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcStoppedMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer func() { + if isServiceStopped { + ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName)) + err = invokeVCenterServiceControl(ctx, startOperation, serviceName, vcAddress) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = waitVCenterServiceToBeInState(ctx, serviceName, vcAddress, svcRunningMessage) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + isServiceStopped = false + } + }() + return isServiceStopped +} diff --git a/tests/e2e/volume_health_test.go b/tests/e2e/volume_health_test.go index caee9a9737..6dcaf90a5e 100644 --- a/tests/e2e/volume_health_test.go +++ b/tests/e2e/volume_health_test.go @@ -1147,7 +1147,9 @@ var _ = ginkgo.Describe("Volume health check", func() { // Waiting for pods status to be Ready. ginkgo.By("Wait for all Pods are Running state") fss.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) - gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + if !windowsEnv { + gomega.Expect(fss.CheckMount(ctx, client, statefulset, mountPath)).NotTo(gomega.HaveOccurred()) + } ssPodsBeforeScaleDown := fss.GetPodList(ctx, client, statefulset) gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) diff --git a/tests/e2e/vsphere_volume_expansion.go b/tests/e2e/vsphere_volume_expansion.go index 1840b21113..2f4eb221c1 100644 --- a/tests/e2e/vsphere_volume_expansion.go +++ b/tests/e2e/vsphere_volume_expansion.go @@ -70,6 +70,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defaultDatastore *object.Datastore isVsanHealthServiceStopped bool isSPSServiceStopped bool + fsType string ) ginkgo.BeforeEach(func() { client = f.ClientSet @@ -112,6 +113,12 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { setResourceQuota(svcClient, svNamespace, rqLimit) } + if windowsEnv { + fsType = ntfsFSType + } else { + fsType = ext4FSType + } + }) ginkgo.AfterEach(func() { @@ -183,7 +190,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.It("[csi-block-vanilla] [csi-guest] [csi-block-vanilla-parallelized] [csi-vcp-mig] Verify volume expansion "+ "with initial filesystem before expansion", ginkgo.Label(p0, block, vanilla, tkg, core), func() { - invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, ext4FSType, "", storagePolicyName, profileID) + invokeTestForVolumeExpansionWithFilesystem(f, client, namespace, fsType, "", storagePolicyName, profileID) }) // Test to verify offline volume expansion workflow with xfs filesystem. @@ -499,7 +506,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, "", storagePolicyName, namespace, ext4FSType) + ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { @@ -575,7 +582,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { defer cancel() volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, "", storagePolicyName, namespace, ext4FSType) + ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -647,7 +654,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { var expectedErrMsg string volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, "", storagePolicyName, namespace, ext4FSType) + ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -783,7 +790,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { // featureEnabled := isFssEnabled(vcAddress, cnsNewSyncFSS) volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, "", storagePolicyName, namespace, ext4FSType) + ctx, f, client, "", storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1046,7 +1053,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared VVOL datastore with allowVolumeExpansion set to true, Create PVC") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, sharedVVOLdatastoreURL, storagePolicyName, namespace, ext4FSType) + ctx, f, client, sharedVVOLdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1136,7 +1143,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared NFS datastore with allowVolumeExpansion set to true") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, sharedNFSdatastoreURL, storagePolicyName, namespace, ext4FSType) + ctx, f, client, sharedNFSdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1232,7 +1239,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass on shared VMFS datastore with allowVolumeExpansion set to true") volHandle, pvclaim, pv, storageclass := createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, sharedVMFSdatastoreURL, storagePolicyName, namespace, ext4FSType) + ctx, f, client, sharedVMFSdatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { err := client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0)) @@ -1896,7 +1903,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) + ctx, f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { @@ -2023,7 +2030,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) + ctx, f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { @@ -2448,7 +2455,7 @@ var _ = ginkgo.Describe("Volume Expansion Test", func() { ginkgo.By("Create StorageClass with allowVolumeExpansion set to true, Create PVC") sharedVSANDatastoreURL := GetAndExpectStringEnvVar(envSharedDatastoreURL) volHandle, pvclaim, pv, storageclass = createSCwithVolumeExpansionTrueAndDynamicPVC( - ctx, f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, ext4FSType) + ctx, f, client, sharedVSANDatastoreURL, storagePolicyName, namespace, fsType) defer func() { if !supervisorCluster { @@ -3001,7 +3008,11 @@ func invokeTestForVolumeExpansion(f *framework.Framework, client clientset.Inter defer cancel() ginkgo.By("Invoking Test for Volume Expansion") scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") var storageclass *storagev1.StorageClass @@ -3242,7 +3253,11 @@ func invokeTestForVolumeExpansionWithFilesystem(f *framework.Framework, client c defer cancel() ginkgo.By("Invoking Test for Volume Expansion 2") scParameters := make(map[string]string) - scParameters[scParamFsType] = fstype + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = fstype + } // Create Storage class and PVC ginkgo.By(fmt.Sprintf("Creating Storage Class with %s filesystem and PVC with allowVolumeExpansion = true", fstype)) @@ -3478,8 +3493,11 @@ func invokeTestForInvalidVolumeExpansion(f *framework.Framework, client clientse ctx, cancel := context.WithCancel(context.Background()) defer cancel() scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType - + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = false") var storageclass *storagev1.StorageClass @@ -3551,7 +3569,11 @@ func invokeTestForInvalidVolumeShrink(f *framework.Framework, client clientset.I ctx, cancel := context.WithCancel(context.Background()) defer cancel() scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") @@ -3664,7 +3686,11 @@ func invokeTestForInvalidVolumeExpansionStaticProvision(f *framework.Framework, ) scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Set up FCD if os.Getenv(envPandoraSyncWaitTime) != "" { @@ -3779,7 +3805,11 @@ func invokeTestForExpandVolumeMultipleTimes(f *framework.Framework, client clien defer cancel() ginkgo.By("Invoking Test to verify Multiple Volume Expansions on the same volume") scParameters := make(map[string]string) - scParameters[scParamFsType] = ext4FSType + if windowsEnv { + scParameters[scParamFsType] = ntfsFSType + } else { + scParameters[scParamFsType] = ext4FSType + } // Create Storage class and PVC ginkgo.By("Creating Storage Class and PVC with allowVolumeExpansion = true") var storageclass *storagev1.StorageClass