From 9b24eb558d6c49334b80b64ef970350b81f33798 Mon Sep 17 00:00:00 2001 From: Ivan Shvedunov Date: Wed, 12 Sep 2018 16:58:17 +0300 Subject: [PATCH 1/3] Add e2e test for block PVs --- .circleci/config.yml | 3 + tests/e2e/block_pv_test.go | 93 ++++++++++ tests/e2e/ceph_test.go | 155 ++-------------- tests/e2e/common.go | 155 ++++++++++++++++ tests/e2e/e2e_test.go | 2 - tests/e2e/framework/pod_interface.go | 8 +- tests/e2e/framework/pvc_interface.go | 264 +++++++++++++++++++++++++++ tests/e2e/framework/vm_interface.go | 28 ++- tests/e2e/volume_mount_test.go | 50 +---- 9 files changed, 564 insertions(+), 194 deletions(-) create mode 100644 tests/e2e/block_pv_test.go create mode 100644 tests/e2e/framework/pvc_interface.go diff --git a/.circleci/config.yml b/.circleci/config.yml index a4d2b16e2..71742f5cd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -175,6 +175,9 @@ e2e: &e2e NO_VM_CONSOLE=1 \ INJECT_LOCAL_IMAGE=1 \ VIRTLET_DEMO_BRANCH=master \ + ENABLE_CEPH=1 \ + FEATURE_GATES="BlockVolume=true" \ + KUBELET_FEATURE_GATES="BlockVolume=true" \ BASE_LOCATION="$PWD" \ deploy/demo.sh - run: diff --git a/tests/e2e/block_pv_test.go b/tests/e2e/block_pv_test.go new file mode 100644 index 000000000..22a3f1abc --- /dev/null +++ b/tests/e2e/block_pv_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 Mirantis + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "github.com/Mirantis/virtlet/tests/e2e/framework" + . "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext" +) + +var _ = Describe("Block PVs", func() { + var ( + vm *framework.VMInterface + ssh framework.Executor + ) + + Context("[Local]", func() { + var ( + virtletNodeName string + devPath string + ) + + withLoopbackBlockDevice(&virtletNodeName, &devPath) + + AfterEach(func() { + if ssh != nil { + ssh.Close() + } + if vm != nil { + deleteVM(vm) + } + }) + + It("Should be accessible from within the VM", func() { + vm = makeVMWithMountAndSymlinkScript(virtletNodeName, []framework.PVCSpec{ + { + Name: "block-pv", + Size: "10M", + NodeName: virtletNodeName, + Block: true, + LocalPath: devPath, + ContainerPath: "/dev/testpvc", + }, + }, nil) + ssh = waitSSH(vm) + expectToBeUsableForFilesystem(ssh, "/dev/testpvc") + }) + }) + + Context("[Ceph RBD]", func() { + var monitorIP string + withCeph(&monitorIP, nil, "ceph-admin") + + AfterEach(func() { + if ssh != nil { + ssh.Close() + } + if vm != nil { + deleteVM(vm) + } + }) + + It("Should be accessible from within the VM", func() { + vm = makeVMWithMountAndSymlinkScript("", []framework.PVCSpec{ + { + Name: "block-pv", + Size: "10M", + Block: true, + CephRBDImage: "rbd-test-image1", + CephMonitorIP: monitorIP, + CephRBDPool: "libvirt-pool", + CephSecretName: "ceph-admin", + ContainerPath: "/dev/testpvc", + }, + }, nil) + ssh = waitSSH(vm) + expectToBeUsableForFilesystem(ssh, "/dev/testpvc") + }) + }) +}) diff --git a/tests/e2e/ceph_test.go b/tests/e2e/ceph_test.go index 4adf1c026..882d476d7 100644 --- a/tests/e2e/ceph_test.go +++ b/tests/e2e/ceph_test.go @@ -22,30 +22,18 @@ import ( . "github.com/onsi/gomega" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/Mirantis/virtlet/tests/e2e/framework" . "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext" ) -const cephContainerName = "ceph_cluster" - var _ = Describe("Ceph volumes tests", func() { var ( monitorIP string secret string ) - BeforeAll(func() { - monitorIP, secret = setupCeph() - }) - - AfterAll(func() { - container, err := controller.DockerContainer(cephContainerName) - Expect(err).NotTo(HaveOccurred()) - container.Delete() - }) + withCeph(&monitorIP, &secret, "") Context("RBD volumes", func() { var ( @@ -87,7 +75,7 @@ var _ = Describe("Ceph volumes tests", func() { scheduleWaitSSH(&vm, &ssh) It("Must be accessible from within OS", func() { - checkFilesystemAccess(ssh) + expectToBeUsableForFilesystem(ssh, "/dev/vdb") }) }) }) @@ -98,57 +86,22 @@ var _ = Describe("Ceph volumes tests", func() { ) BeforeAll(func() { - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rbd-pv-virtlet", - }, - Spec: v1.PersistentVolumeSpec{ - Capacity: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("10M"), - }, - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - PersistentVolumeSource: v1.PersistentVolumeSource{ - FlexVolume: cephPersistentVolumeSource("rbd-test-image-pv", monitorIP, secret), - }, - }, - } - do(controller.PersistentVolumesClient().Create(pv)) - - pvc := &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rbd-claim", - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: resource.MustParse("10M"), - }, - }, - }, - } - do(controller.PersistentVolumeClaimsClient().Create(pvc)) - vm = controller.VM("cirros-vm-rbd-pv") - podCustomization := func(pod *framework.PodInterface) { - pod.Pod.Spec.Volumes = append(pod.Pod.Spec.Volumes, v1.Volume{ - Name: "test", - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: "rbd-claim", - }, + opts := VMOptions{ + PVCs: []framework.PVCSpec{ + { + Name: "rbd-pv-virtlet", + Size: "10M", + FlexVolumeOptions: cephOptions("rbd-test-image-pv", monitorIP, secret), }, - }) - } - - Expect(vm.CreateAndWait(VMOptions{}.ApplyDefaults(), time.Minute*5, podCustomization)).To(Succeed()) + }, + }.ApplyDefaults() + Expect(vm.CreateAndWait(opts, time.Minute*5, nil)).To(Succeed()) _ = do(vm.Pod()).(*framework.PodInterface) }) AfterAll(func() { deleteVM(vm) - controller.PersistentVolumeClaimsClient().Delete("rbd-claim", nil) - controller.PersistentVolumesClient().Delete("rbd-pv-virtlet", nil) }) It("Must be attached to libvirt domain", func() { @@ -156,89 +109,18 @@ var _ = Describe("Ceph volumes tests", func() { Expect(regexp.MustCompile("(?m:rbd-test-image-pv$)").MatchString(out)).To(BeTrue()) }) - Context("Mounted volumes", func() { - var ssh framework.Executor - scheduleWaitSSH(&vm, &ssh) - - It("Must be accessible from within OS", func() { - checkFilesystemAccess(ssh) - }) + It("Must be accessible from within the VM", func() { + ssh := waitSSH(vm) + expectToBeUsableForFilesystem(ssh, "/dev/vdb") }) }) }) -func checkFilesystemAccess(ssh framework.Executor) { - do(framework.RunSimple(ssh, "sudo /usr/sbin/mkfs.ext2 /dev/vdb")) - do(framework.RunSimple(ssh, "sudo mount /dev/vdb /mnt")) - out := do(framework.RunSimple(ssh, "ls -l /mnt")).(string) - Expect(out).To(ContainSubstring("lost+found")) -} - -func setupCeph() (string, string) { - nodeExecutor, err := controller.DinDNodeExecutor("kube-master") - Expect(err).NotTo(HaveOccurred()) - - route, err := framework.RunSimple(nodeExecutor, "route", "-n") - Expect(err).NotTo(HaveOccurred()) - - match := regexp.MustCompile(`(?:default|0\.0\.0\.0)\s+([\d.]+)`).FindStringSubmatch(route) - Expect(match).To(HaveLen(2)) - - monIP := match[1] - cephPublicNetwork := monIP + "/16" - - container, err := controller.DockerContainer(cephContainerName) - Expect(err).NotTo(HaveOccurred()) - - container.Delete() - Expect(container.PullImage("docker.io/ceph/daemon:v3.1.0-stable-3.1-mimic-centos-7")).To(Succeed()) - Expect(container.Run("docker.io/ceph/daemon:v3.1.0-stable-3.1-mimic-centos-7", - map[string]string{ - "MON_IP": monIP, - "CEPH_PUBLIC_NETWORK": cephPublicNetwork, - "CEPH_DEMO_UID": "foo", - "CEPH_DEMO_ACCESS_KEY": "foo", - "CEPH_DEMO_SECRET_KEY": "foo", - "CEPH_DEMO_BUCKET": "foo", - "DEMO_DAEMONS": "osd mds", - }, - "host", nil, false, "demo")).To(Succeed()) - - cephContainerExecutor := container.Executor(false, "") - By("Waiting for ceph cluster") - Eventually(func() error { - _, err := framework.RunSimple(cephContainerExecutor, "ceph", "-s") - return err - }).Should(Succeed()) - By("Ceph cluster started") - - var out string - commands := []string{ - // Adjust ceph configs - `echo -e "rbd default features = 1\nrbd default format = 2" >> /etc/ceph/ceph.conf`, - - // Add rbd pool and volume - `ceph osd pool create libvirt-pool 8 8`, - `rbd create rbd-test-image1 --size 10M --pool libvirt-pool --image-feature layering`, - `rbd create rbd-test-image2 --size 10M --pool libvirt-pool --image-feature layering`, - `rbd create rbd-test-image-pv --size 10M --pool libvirt-pool --image-feature layering`, - - // Add user for virtlet - `ceph auth get-or-create client.libvirt`, - `ceph auth caps client.libvirt mon "allow *" osd "allow *"`, - `ceph auth get-key client.libvirt`, - } - for _, cmd := range commands { - out = do(framework.RunSimple(cephContainerExecutor, "/bin/bash", "-c", cmd)).(string) - } - return monIP, out -} - func cephOptions(volume, monitorIP, secret string) map[string]string { return map[string]string{ "type": "ceph", "monitor": monitorIP + ":6789", - "user": "libvirt", + "user": "admin", "secret": secret, "volume": volume, "pool": "libvirt-pool", @@ -252,9 +134,4 @@ func cephVolumeSource(volume, monitorIP, secret string) *v1.FlexVolumeSource { } } -func cephPersistentVolumeSource(volume, monitorIP, secret string) *v1.FlexPersistentVolumeSource { - return &v1.FlexPersistentVolumeSource{ - Driver: "virtlet/flexvolume_driver", - Options: cephOptions(volume, monitorIP, secret), - } -} +// TODO: use client.admin instead of client.libvirt diff --git a/tests/e2e/common.go b/tests/e2e/common.go index d3c4367c9..3747d52eb 100644 --- a/tests/e2e/common.go +++ b/tests/e2e/common.go @@ -27,11 +27,15 @@ import ( . "github.com/onsi/gomega" "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "github.com/Mirantis/virtlet/tests/e2e/framework" . "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const cephContainerName = "ceph_cluster" + var ( vmImageLocation = flag.String("image", defaultVMImageLocation, "VM image URL (*without http(s)://*") sshUser = flag.String("sshuser", DefaultSSHUser, "default SSH user for VMs") @@ -39,6 +43,7 @@ var ( includeUnsafeTests = flag.Bool("include-unsafe-tests", false, "include tests that can be unsafe if they're run outside the build container") memoryLimit = flag.Int("memoryLimit", 160, "default VM memory limit (in MiB)") junitOutput = flag.String("junitOutput", "", "JUnit XML output file") + controller *framework.Controller ) // scheduleWaitSSH schedules SSH interface initialization before the test context starts @@ -174,3 +179,153 @@ func includeUnsafe() { Skip("Tests that are unsafe outside the build container are disabled") } } + +func withLoopbackBlockDevice(virtletNodeName, devPath *string) { + var nodeExecutor framework.Executor + BeforeAll(func() { + var err error + *virtletNodeName, err = controller.VirtletNodeName() + Expect(err).NotTo(HaveOccurred()) + nodeExecutor, err = controller.DinDNodeExecutor(*virtletNodeName) + Expect(err).NotTo(HaveOccurred()) + + _, err = framework.RunSimple(nodeExecutor, "dd", "if=/dev/zero", "of=/rawdevtest", "bs=1M", "count=10") + Expect(err).NotTo(HaveOccurred()) + // We use mkfs.ext3 here because mkfs.ext4 on + // the node may be too new for CirrOS, causing + // errors like this in VM's dmesg: + // [ 1.316395] EXT3-fs (vdb): error: couldn't mount because of unsupported optional features (2c0) + // [ 1.320222] EXT4-fs (vdb): couldn't mount RDWR because of unsupported optional features (400) + // [ 1.339594] EXT3-fs (vdc1): error: couldn't mount because of unsupported optional features (240) + // [ 1.342850] EXT4-fs (vdc1): mounted filesystem with ordered data mode. Opts: (null) + _, err = framework.RunSimple(nodeExecutor, "mkfs.ext3", "/rawdevtest") + Expect(err).NotTo(HaveOccurred()) + *devPath, err = framework.RunSimple(nodeExecutor, "losetup", "-f", "/rawdevtest", "--show") + Expect(err).NotTo(HaveOccurred()) + }) + + AfterAll(func() { + // The loopback device is detached by itself upon + // success (TODO: check why it happens), so we + // ignore errors here + framework.RunSimple(nodeExecutor, "losetup", "-d", *devPath) + }) +} + +func withCeph(monitorIP, secret *string, kubeSecret string) { + BeforeAll(func() { + nodeExecutor, err := (*controller).DinDNodeExecutor("kube-master") + Expect(err).NotTo(HaveOccurred()) + + route, err := framework.RunSimple(nodeExecutor, "route", "-n") + Expect(err).NotTo(HaveOccurred()) + + match := regexp.MustCompile(`(?:default|0\.0\.0\.0)\s+([\d.]+)`).FindStringSubmatch(route) + Expect(match).To(HaveLen(2)) + + *monitorIP = match[1] + cephPublicNetwork := *monitorIP + "/16" + + container, err := controller.DockerContainer(cephContainerName) + Expect(err).NotTo(HaveOccurred()) + + container.Delete() + Expect(container.PullImage("docker.io/ceph/daemon:v3.1.0-stable-3.1-mimic-centos-7")).To(Succeed()) + Expect(container.Run("docker.io/ceph/daemon:v3.1.0-stable-3.1-mimic-centos-7", + map[string]string{ + "MON_IP": *monitorIP, + "CEPH_PUBLIC_NETWORK": cephPublicNetwork, + "CEPH_DEMO_UID": "foo", + "CEPH_DEMO_ACCESS_KEY": "foo", + "CEPH_DEMO_SECRET_KEY": "foo", + "CEPH_DEMO_BUCKET": "foo", + "DEMO_DAEMONS": "osd mds", + }, + "host", nil, false, "demo")).To(Succeed()) + + cephContainerExecutor := container.Executor(false, "") + By("Waiting for ceph cluster") + Eventually(func() error { + _, err := framework.RunSimple(cephContainerExecutor, "ceph", "-s") + return err + }).Should(Succeed()) + By("Ceph cluster started") + + commands := []string{ + // Add rbd pool and volume + `ceph osd pool create libvirt-pool 8 8`, + `rbd create rbd-test-image1 --size 10M --pool libvirt-pool --image-feature layering`, + `rbd create rbd-test-image2 --size 10M --pool libvirt-pool --image-feature layering`, + `rbd create rbd-test-image-pv --size 10M --pool libvirt-pool --image-feature layering`, + + // Add user for virtlet + `ceph auth get-key client.admin`, + } + var out string + for _, cmd := range commands { + out = do(framework.RunSimple(cephContainerExecutor, "/bin/bash", "-c", cmd)).(string) + } + if secret != nil { + *secret = out + } + if kubeSecret != "" { + // buf := bytes.NewBufferString(out) + // decoder := base64.NewDecoder(base64.StdEncoding, buf) + // decoded, err := ioutil.ReadAll(decoder) + // Expect(err).NotTo(HaveOccurred()) + _, err = controller.Secrets().Create(&v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeSecret, + }, + Type: "kubernetes.io/rbd", + Data: map[string][]byte{ + "key": []byte(out), + }, + }) + Expect(err).NotTo(HaveOccurred()) + } + }) + + AfterAll(func() { + container, err := controller.DockerContainer(cephContainerName) + Expect(err).NotTo(HaveOccurred()) + container.Delete() + if kubeSecret != "" { + Expect(controller.Secrets().Delete(kubeSecret, nil)).To(Succeed()) + Eventually(func() error { + if _, err := controller.Secrets().Get(kubeSecret, metav1.GetOptions{}); err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + return fmt.Errorf("secret %s was not deleted", kubeSecret) + }) + } + }) +} + +func makeVMWithMountAndSymlinkScript(nodeName string, PVCs []framework.PVCSpec, podCustomization func(*framework.PodInterface)) *framework.VMInterface { + vm := controller.VM("mount-vm") + Expect(vm.CreateAndWait(VMOptions{ + NodeName: nodeName, + // TODO: should also have an option to test using + // ubuntu image with volumes mounted using cloud-init + // userdata 'mounts' section + UserDataScript: "@virtlet-mount-script@", + PVCs: PVCs, + }.ApplyDefaults(), time.Minute*5, podCustomization)).To(Succeed()) + _, err := vm.Pod() + Expect(err).NotTo(HaveOccurred()) + return vm +} + +func expectToBeUsableForFilesystem(ssh framework.Executor, devPath string) { + Eventually(func() error { + _, err := framework.RunSimple(ssh, fmt.Sprintf("sudo /usr/sbin/mkfs.ext2 %s", devPath)) + return err + }, 60*5, 3).Should(Succeed()) + do(framework.RunSimple(ssh, fmt.Sprintf("sudo mount %s /mnt", devPath))) + out := do(framework.RunSimple(ssh, "ls -l /mnt")).(string) + Expect(out).To(ContainSubstring("lost+found")) +} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 7f7101044..f101c63e0 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -29,8 +29,6 @@ import ( . "github.com/Mirantis/virtlet/tests/e2e/ginkgo-ext" ) -var controller *framework.Controller - func TestE2E(t *testing.T) { SetDefaultEventuallyTimeout(time.Minute * 5) RegisterFailHandler(Fail) diff --git a/tests/e2e/framework/pod_interface.go b/tests/e2e/framework/pod_interface.go index 7a957505b..0ef43cb47 100644 --- a/tests/e2e/framework/pod_interface.go +++ b/tests/e2e/framework/pod_interface.go @@ -30,7 +30,6 @@ import ( "github.com/davecgh/go-spew/spew" "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/portforward" @@ -137,8 +136,8 @@ func (pi *PodInterface) Wait(timing ...time.Duration) error { return pi.WaitForPodStatus(nil, timing...) } -// WaitDestruction waits for the pod to be deleted -func (pi *PodInterface) WaitDestruction(timing ...time.Duration) error { +// WaitForDestruction waits for the pod to be deleted +func (pi *PodInterface) WaitForDestruction(timing ...time.Duration) error { timeout := time.Minute * 5 pollPeriond := time.Second consistencyPeriod := time.Second * 5 @@ -153,8 +152,7 @@ func (pi *PodInterface) WaitDestruction(timing ...time.Duration) error { } return waitForConsistentState(func() error { - _, err := pi.controller.client.Pods(pi.Pod.Namespace).Get(pi.Pod.Name, metav1.GetOptions{}) - if err != nil { + if _, err := pi.controller.client.Pods(pi.Pod.Namespace).Get(pi.Pod.Name, metav1.GetOptions{}); err != nil { if k8serrors.IsNotFound(err) { return nil } diff --git a/tests/e2e/framework/pvc_interface.go b/tests/e2e/framework/pvc_interface.go new file mode 100644 index 000000000..7ace300c1 --- /dev/null +++ b/tests/e2e/framework/pvc_interface.go @@ -0,0 +1,264 @@ +/* +Copyright 2017 Mirantis + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "fmt" + "strings" + "time" + + "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PVCSpec describes a PVC+PV pair to create. +type PVCSpec struct { + // The name of PVC. The PV name is derived by adding the "-pv" + // suffix. + Name string + // The size of PV. Must be parseable as k8s resource quantity + // (e.g. 10M). + Size string + // If non-empty, specifies the node name for local PV. + NodeName string + // If true, specifies the block volume mode, otherwise + // filesystem volume mode is used. + Block bool + // In block volume mode, the path to the block device inside the VM, + // empty value means not referencing the volume in volumeDevices. + // In filesystem volume mode, the path inside the VM for mounting the volume, + // empty value means not mounting the volume. + ContainerPath string + // For local PVs, specifies the path to the directory when + // using filesystem mode, and the path to the block device in + // the block mode. + LocalPath string + // Ceph RBD image name. + CephRBDImage string + // Ceph monitor IP. + CephMonitorIP string + // Ceph pool name for the RBD. + CephRBDPool string + // The name of Kubernetes secret to use for Ceph. + CephSecretName string + // FlexVolume options for Virtlet flexvolume driver. + FlexVolumeOptions map[string]string +} + +func (spec PVCSpec) pvSource(namespace string) v1.PersistentVolumeSource { + switch { + case spec.LocalPath != "": + if spec.CephRBDImage != "" || len(spec.FlexVolumeOptions) > 0 { + panic("Can only use one of LocalPath, CephRBDImage and FlexVolumeOptions at the same time") + } + return v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{ + Path: spec.LocalPath, + }, + } + case spec.CephRBDImage != "" && len(spec.FlexVolumeOptions) > 0: + panic("Can only use one of LocalPath, CephRBDImage and FlexVolumeOptions at the same time") + case spec.CephRBDImage != "": + return v1.PersistentVolumeSource{ + RBD: &v1.RBDPersistentVolumeSource{ + CephMonitors: []string{spec.CephMonitorIP}, + RBDImage: spec.CephRBDImage, + RBDPool: spec.CephRBDPool, + SecretRef: &v1.SecretReference{ + Name: spec.CephSecretName, + Namespace: namespace, + }, + }, + } + case len(spec.FlexVolumeOptions) > 0: + return v1.PersistentVolumeSource{ + FlexVolume: &v1.FlexPersistentVolumeSource{ + Driver: "virtlet/flexvolume_driver", + Options: spec.FlexVolumeOptions, + }, + } + default: + panic("bad PV/PVC spec") + } +} + +func (spec PVCSpec) nodeAffinity() *v1.VolumeNodeAffinity { + if spec.NodeName == "" { + return nil + } + return &v1.VolumeNodeAffinity{ + Required: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: "In", + Values: []string{spec.NodeName}, + }, + }, + }, + }, + }, + } +} + +// PVCInterface is used to work with PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs). +type PVCInterface struct { + controller *Controller + // Spec for the PV and PVC. + Spec PVCSpec + // Kubernetes PV object + Volume *v1.PersistentVolume + // Kubernetes PVC object + Claim *v1.PersistentVolumeClaim +} + +func newPersistentVolumeClaim(controller *Controller, spec PVCSpec) *PVCInterface { + volMode := v1.PersistentVolumeFilesystem + if spec.Block { + volMode = v1.PersistentVolumeBlock + } + return &PVCInterface{ + controller: controller, + Spec: spec, + Volume: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: spec.Name + "-pv", + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(spec.Size), + }, + VolumeMode: &volMode, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + ClaimRef: &v1.ObjectReference{ + Name: spec.Name, + Namespace: controller.Namespace(), + }, + PersistentVolumeSource: spec.pvSource(controller.Namespace()), + NodeAffinity: spec.nodeAffinity(), + }, + }, + Claim: &v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: spec.Name, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + VolumeMode: &volMode, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: resource.MustParse(spec.Size), + }, + }, + }, + }, + } +} + +// AddToPod adds the volume to the pod referencing the PVC. +func (pvci *PVCInterface) AddToPod(pi *PodInterface, name string) { + pi.Pod.Spec.Volumes = append(pi.Pod.Spec.Volumes, v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvci.Claim.Name, + }, + }, + }) + if pvci.Spec.ContainerPath != "" { + c := &pi.Pod.Spec.Containers[0] + if pvci.Spec.Block { + c.VolumeDevices = append(c.VolumeDevices, v1.VolumeDevice{ + Name: name, + DevicePath: pvci.Spec.ContainerPath, + }) + } else { + c.VolumeMounts = append(c.VolumeMounts, v1.VolumeMount{ + Name: name, + MountPath: pvci.Spec.ContainerPath, + }) + } + } +} + +// Create creates the PVC and its corresponding PV. +func (pvci *PVCInterface) Create() error { + updatedPV, err := pvci.controller.PersistentVolumesClient().Create(pvci.Volume) + if err != nil { + return err + } + pvci.Volume = updatedPV + updatedPVC, err := pvci.controller.PersistentVolumeClaimsClient().Create(pvci.Claim) + if err != nil { + return err + } + pvci.Claim = updatedPVC + return nil +} + +// Delete deletes the PVC and its corresponding PV. +// It doesn't return an error if either PVC or PV doesn't exist. +func (pvci *PVCInterface) Delete() error { + var errs []string + if err := pvci.controller.PersistentVolumeClaimsClient().Delete(pvci.Claim.Name, nil); err != nil && !k8serrors.IsNotFound(err) { + errs = append(errs, fmt.Sprintf("error deleting pvc %q: %v", pvci.Claim.Name, err)) + } + if err := pvci.controller.PersistentVolumesClient().Delete(pvci.Volume.Name, nil); err != nil && !k8serrors.IsNotFound(err) { + errs = append(errs, fmt.Sprintf("error deleting pv %q: %v", pvci.Volume.Name, err)) + } + if len(errs) == 0 { + return nil + } + return errors.New(strings.Join(errs, "\n")) +} + +// WaitForDestruction waits for the PV and PVC to be deleted. +func (pvci *PVCInterface) WaitForDestruction(timing ...time.Duration) error { + timeout := time.Minute * 5 + pollPeriond := time.Second + consistencyPeriod := time.Second * 5 + if len(timing) > 0 { + timeout = timing[0] + } + if len(timing) > 1 { + pollPeriond = timing[1] + } + if len(timing) > 2 { + consistencyPeriod = timing[2] + } + + return waitForConsistentState(func() error { + switch _, err := pvci.controller.PersistentVolumeClaimsClient().Get(pvci.Claim.Name, metav1.GetOptions{}); { + case err == nil: + return errors.New("PVC not deleted") + case !k8serrors.IsNotFound(err): + return err + } + switch _, err := pvci.controller.PersistentVolumesClient().Get(pvci.Volume.Name, metav1.GetOptions{}); { + case err == nil: + return errors.New("PV not deleted") + case !k8serrors.IsNotFound(err): + return err + } + return nil + }, timeout, pollPeriond, consistencyPeriod) +} diff --git a/tests/e2e/framework/vm_interface.go b/tests/e2e/framework/vm_interface.go index 91435e509..b5544671b 100644 --- a/tests/e2e/framework/vm_interface.go +++ b/tests/e2e/framework/vm_interface.go @@ -35,6 +35,7 @@ type VMInterface struct { pod *PodInterface Name string + PVCs []*PVCInterface } // VMOptions defines VM parameters @@ -67,6 +68,8 @@ type VMOptions struct { RootVolumeSize string // "cni" annotation value for CNI-Genie MultiCNI string + // PVCs (with corresponding PVs) to use + PVCs []PVCSpec } func newVMInterface(controller *Controller, name string) *VMInterface { @@ -106,6 +109,14 @@ func (vmi *VMInterface) PodWithoutChecks() *PodInterface { // Create creates a new VM pod func (vmi *VMInterface) Create(options VMOptions, beforeCreate func(*PodInterface)) error { pod := newPodInterface(vmi.controller, vmi.buildVMPod(options)) + for _, pvcSpec := range options.PVCs { + pvc := newPersistentVolumeClaim(vmi.controller, pvcSpec) + if err := pvc.Create(); err != nil { + return err + } + pvc.AddToPod(pod, pvcSpec.Name) + vmi.PVCs = append(vmi.PVCs, pvc) + } if beforeCreate != nil { beforeCreate(pod) } @@ -130,8 +141,21 @@ func (vmi *VMInterface) Delete(waitTimeout time.Duration) error { if vmi.pod == nil { return nil } - vmi.pod.Delete() - return vmi.pod.WaitDestruction(waitTimeout) + if err := vmi.pod.Delete(); err != nil { + return err + } + if err := vmi.pod.WaitForDestruction(waitTimeout); err != nil { + return err + } + for _, pvc := range vmi.PVCs { + if err := pvc.Delete(); err != nil { + return err + } + if err := pvc.WaitForDestruction(); err != nil { + return err + } + } + return nil } // VirtletPod returns pod in which virtlet instance, responsible for this VM is located diff --git a/tests/e2e/volume_mount_test.go b/tests/e2e/volume_mount_test.go index aa18e7414..614531bcc 100644 --- a/tests/e2e/volume_mount_test.go +++ b/tests/e2e/volume_mount_test.go @@ -30,34 +30,13 @@ import ( var _ = Describe("Container volume mounts", func() { Context("Of raw volumes", func() { var ( - err error virtletNodeName string vm *framework.VMInterface - nodeExecutor framework.Executor devPath string ssh framework.Executor ) - BeforeAll(func() { - virtletNodeName, err = controller.VirtletNodeName() - Expect(err).NotTo(HaveOccurred()) - nodeExecutor, err = controller.DinDNodeExecutor(virtletNodeName) - Expect(err).NotTo(HaveOccurred()) - - _, err = framework.RunSimple(nodeExecutor, "dd", "if=/dev/zero", "of=/rawdevtest", "bs=1M", "count=10") - Expect(err).NotTo(HaveOccurred()) - // We use mkfs.ext3 here because mkfs.ext4 on - // the node may be too new for CirrOS, causing - // errors like this in VM's dmesg: - // [ 1.316395] EXT3-fs (vdb): error: couldn't mount because of unsupported optional features (2c0) - // [ 1.320222] EXT4-fs (vdb): couldn't mount RDWR because of unsupported optional features (400) - // [ 1.339594] EXT3-fs (vdc1): error: couldn't mount because of unsupported optional features (240) - // [ 1.342850] EXT4-fs (vdc1): mounted filesystem with ordered data mode. Opts: (null) - _, err = framework.RunSimple(nodeExecutor, "mkfs.ext3", "/rawdevtest") - Expect(err).NotTo(HaveOccurred()) - devPath, err = framework.RunSimple(nodeExecutor, "losetup", "-f", "/rawdevtest", "--show") - Expect(err).NotTo(HaveOccurred()) - }) + withLoopbackBlockDevice(&virtletNodeName, &devPath) AfterEach(func() { if ssh != nil { @@ -68,15 +47,8 @@ var _ = Describe("Container volume mounts", func() { } }) - AfterAll(func() { - // The loopback device is detached by itself upon - // success (TODO: check why it happens), so we - // ignore errors here - framework.RunSimple(nodeExecutor, "losetup", "-d", devPath) - }) - It("Should be handled inside the VM", func() { - vm = makeVolumeMountVM(virtletNodeName, func(pod *framework.PodInterface) { + vm = makeVMWithMountAndSymlinkScript(virtletNodeName, nil, func(pod *framework.PodInterface) { addFlexvolMount(pod, "blockdev", "/foo", map[string]string{ "type": "raw", "path": devPath, @@ -88,7 +60,7 @@ var _ = Describe("Container volume mounts", func() { }) It("Should be handled inside the VM together with another volume mount", func() { - vm = makeVolumeMountVM(virtletNodeName, func(pod *framework.PodInterface) { + vm = makeVMWithMountAndSymlinkScript(virtletNodeName, nil, func(pod *framework.PodInterface) { addFlexvolMount(pod, "blockdev1", "/foo", map[string]string{ "type": "raw", "path": devPath, @@ -112,7 +84,7 @@ var _ = Describe("Container volume mounts", func() { ) BeforeAll(func() { - vm = makeVolumeMountVM("", func(pod *framework.PodInterface) { + vm = makeVMWithMountAndSymlinkScript("", nil, func(pod *framework.PodInterface) { addFlexvolMount(pod, "blockdev", "/foo", map[string]string{ "type": "qcow2", "capacity": "10MB", @@ -268,20 +240,6 @@ func addFlexvolMount(pod *framework.PodInterface, name string, mountPath string, }) } -func makeVolumeMountVM(nodeName string, podCustomization func(*framework.PodInterface)) *framework.VMInterface { - vm := controller.VM("mount-vm") - Expect(vm.CreateAndWait(VMOptions{ - NodeName: nodeName, - // TODO: should also have an option to test using - // ubuntu image with volumes mounted using cloud-init - // userdata 'mounts' section - UserDataScript: "@virtlet-mount-script@", - }.ApplyDefaults(), time.Minute*5, podCustomization)).To(Succeed()) - _, err := vm.Pod() - Expect(err).NotTo(HaveOccurred()) - return vm -} - func shouldBeMounted(ssh framework.Executor, path string) { Eventually(func() (string, error) { return framework.RunSimple(ssh, "ls -l "+path) From bc746b02a21b8d58aac53780d20bec3453042b00 Mon Sep 17 00:00:00 2001 From: Ivan Shvedunov Date: Wed, 19 Sep 2018 12:19:16 +0300 Subject: [PATCH 2/3] Update ceph daemon start command in pv doc --- docs/design-proposals/pv.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/design-proposals/pv.md b/docs/design-proposals/pv.md index d5e2b4bd9..691162d45 100644 --- a/docs/design-proposals/pv.md +++ b/docs/design-proposals/pv.md @@ -412,7 +412,12 @@ MON_IP=$(docker exec kube-master route | grep default | awk '{print $2}') CEPH_PUBLIC_NETWORK=${MON_IP}/16 docker run -d --net=host -e MON_IP=${MON_IP} \ -e CEPH_PUBLIC_NETWORK=${CEPH_PUBLIC_NETWORK} \ - --name ceph_cluster docker.io/ceph/demo + -e CEPH_DEMO_UID=foo \ + -e CEPH_DEMO_ACCESS_KEY=foo \ + -e CEPH_DEMO_SECRET_KEY=foo \ + -e CEPH_DEMO_BUCKET=foo \ + -e DEMO_DAEMONS="osd mds" \ + --name ceph_cluster docker.io/ceph/daemon demo ``` Create a pool there: From 2ee95508f54bee5e94d0a87410e59a3ed92c92ad Mon Sep 17 00:00:00 2001 From: Ivan Shvedunov Date: Wed, 19 Sep 2018 12:20:12 +0300 Subject: [PATCH 3/3] Don't run Ceph RBD test on CircleCI for now --- tests/e2e/block_pv_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/e2e/block_pv_test.go b/tests/e2e/block_pv_test.go index 22a3f1abc..24030df30 100644 --- a/tests/e2e/block_pv_test.go +++ b/tests/e2e/block_pv_test.go @@ -73,7 +73,9 @@ var _ = Describe("Block PVs", func() { } }) - It("Should be accessible from within the VM", func() { + // FIXME: the test is marked Disruptive because rbd + // hangs on CircleCI for some reason. + It("[Disruptive] Should be accessible from within the VM", func() { vm = makeVMWithMountAndSymlinkScript("", []framework.PVCSpec{ { Name: "block-pv",