diff --git a/pkg/controller/volume/selinuxwarning/cache/volumecache.go b/pkg/controller/volume/selinuxwarning/cache/volumecache.go index dfa129dae9064..4b19c985c866e 100644 --- a/pkg/controller/volume/selinuxwarning/cache/volumecache.go +++ b/pkg/controller/volume/selinuxwarning/cache/volumecache.go @@ -114,11 +114,19 @@ func (c *volumeCache) AddVolume(logger klog.Logger, volumeName v1.UniqueVolumeNa } // The volume is already known - // Add the pod to the cache or update its properties - volume.pods[podKey] = podInfo{ + podInfo := podInfo{ seLinuxLabel: label, changePolicy: changePolicy, } + oldPodInfo, found := volume.pods[podKey] + if found && oldPodInfo == podInfo { + // The Pod is already known too and nothing changed since the last update. + // All conflicts were already reported when the Pod was added / updated in the cache last time. + return conflicts + } + + // Add the updated pod info to the cache + volume.pods[podKey] = podInfo // Emit conflicts for the pod for otherPodKey, otherPodInfo := range volume.pods { diff --git a/pkg/controller/volume/selinuxwarning/selinux_warning_controller.go b/pkg/controller/volume/selinuxwarning/selinux_warning_controller.go index e5049e05abc39..d373eeabf5fd5 100644 --- a/pkg/controller/volume/selinuxwarning/selinux_warning_controller.go +++ b/pkg/controller/volume/selinuxwarning/selinux_warning_controller.go @@ -140,9 +140,9 @@ func NewController( logger := klog.FromContext(ctx) _, err = podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.addPod(logger, obj) }, - DeleteFunc: func(obj interface{}) { c.deletePod(logger, obj) }, - // Not watching updates: Pod volumes and SecurityContext are immutable after creation + AddFunc: func(obj interface{}) { c.enqueuePod(logger, obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { c.updatePod(logger, oldObj, newObj) }, + DeleteFunc: func(obj interface{}) { c.enqueuePod(logger, obj) }, }) if err != nil { return nil, err @@ -178,7 +178,7 @@ func NewController( return c, nil } -func (c *Controller) addPod(_ klog.Logger, obj interface{}) { +func (c *Controller) enqueuePod(_ klog.Logger, obj interface{}) { podRef, err := cache.DeletionHandlingObjectToName(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("couldn't get key for pod %#v: %w", obj, err)) @@ -186,12 +186,29 @@ func (c *Controller) addPod(_ klog.Logger, obj interface{}) { c.queue.Add(podRef) } -func (c *Controller) deletePod(_ klog.Logger, obj interface{}) { - podRef, err := cache.DeletionHandlingObjectToName(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for pod %#v: %w", obj, err)) +func (c *Controller) updatePod(logger klog.Logger, oldObj, newObj interface{}) { + // Pod.Spec fields that are relevant to this controller are immutable after creation (i.e. + // pod volumes, SELinux labels, privileged flag). React to update only when the Pod + // reaches its final state - kubelet will unmount the Pod volumes and the controller should + // therefore remove them from the cache. + oldPod, ok := oldObj.(*v1.Pod) + if !ok { + return } - c.queue.Add(podRef) + newPod, ok := newObj.(*v1.Pod) + if !ok { + return + } + + // This is an optimization. In theory, passing most pod updates to the controller queue should lead to noop. + // To save some CPU, pass only pod updates that can cause any action in the controller + if oldPod.Status.Phase == newPod.Status.Phase { + return + } + if newPod.Status.Phase != v1.PodFailed && newPod.Status.Phase != v1.PodSucceeded { + return + } + c.enqueuePod(logger, newObj) } func (c *Controller) addPVC(logger klog.Logger, obj interface{}) { @@ -277,11 +294,7 @@ func (c *Controller) enqueueAllPodsForPVC(logger klog.Logger, namespace, name st return } for _, obj := range objs { - podRef, err := cache.DeletionHandlingObjectToName(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't get key for pod %#v: %w", obj, err)) - } - c.queue.Add(podRef) + c.enqueuePod(logger, obj) } } @@ -401,6 +414,11 @@ func (c *Controller) sync(ctx context.Context, podRef cache.ObjectName) error { logger.V(5).Info("Error getting pod from informer", "pod", klog.KObj(pod), "podUID", pod.UID, "err", err) return err } + if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { + // The pod has reached its final state and kubelet is unmounting is volumes. + // Remove them from the cache. + return c.syncPodDelete(ctx, podRef) + } return c.syncPod(ctx, pod) } @@ -481,8 +499,15 @@ func (c *Controller) syncVolume(logger klog.Logger, pod *v1.Pod, spec *volume.Sp changePolicy := v1.SELinuxChangePolicyMountOption if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SELinuxChangePolicy != nil { changePolicy = *pod.Spec.SecurityContext.SELinuxChangePolicy + logger.V(5).Info("Using Pod SELinux change policy", "pod", klog.KObj(pod), "changePolicy", changePolicy) } - if !pluginSupportsSELinuxContextMount { + if !pluginSupportsSELinuxContextMount && changePolicy != v1.SELinuxChangePolicyRecursive { + logger.V(5).Info("Volume does not support SELinux context mount, setting changePolicy to Recursive", "pod", klog.KObj(pod), "volume", spec.Name()) + changePolicy = v1.SELinuxChangePolicyRecursive + } + + if seLinuxLabel == "" && changePolicy != v1.SELinuxChangePolicyRecursive { + logger.V(5).Info("Pod has empty SELinux label, setting changePolicy to Recursive", "pod", klog.KObj(pod)) changePolicy = v1.SELinuxChangePolicyRecursive } diff --git a/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go b/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go index 3f863cac8abb4..503f1cbcbc68d 100644 --- a/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go +++ b/pkg/controller/volume/selinuxwarning/selinux_warning_controller_test.go @@ -64,7 +64,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { { name: "existing pod with no volumes", existingPods: []*v1.Pod{ - pod("pod1", "s0:c1,c2", nil), + pod("pod1", "s0:c1,c2", nil).build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, expectedEvents: nil, @@ -73,7 +73,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { { name: "existing pod with unbound PVC", existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", nil, "non-existing-pvc", "vol1"), + pod("pod1", "s0:c1,c2", nil).withPVC("non-existing-pvc", "vol1").build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, expectError: true, // PVC is missing, add back to queue with exp. backoff @@ -89,7 +89,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", nil, "pvc1", "vol1"), + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, expectedEvents: nil, @@ -112,7 +112,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"), + pod("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive)).withPVC("pvc1", "vol1").build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, expectedEvents: nil, @@ -135,7 +135,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - addInlineVolume(pod("pod1", "s0:c1,c2", nil)), + pod("pod1", "s0:c1,c2", nil).withInlineVolume().build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, expectedEvents: nil, @@ -158,7 +158,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - addInlineVolume(podWithPVC("pod1", "s0:c1,c2", nil, "pvc1", "vol1")), + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").withInlineVolume().build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, expectedEvents: nil, @@ -188,8 +188,8 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", nil, "pvc1", "vol1"), - pod("pod2", "s0:c98,c99", nil), + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").build(), + pod("pod2", "s0:c98,c99", nil).build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, conflicts: []volumecache.Conflict{ @@ -233,8 +233,8 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"), - pod("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyRecursive)), + pod("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive)).withPVC("pvc1", "vol1").build(), + pod("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyRecursive)).build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, conflicts: []volumecache.Conflict{}, @@ -257,8 +257,8 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive), "pvc1", "vol1"), - podWithPVC("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyMountOption), "pvc1", "vol1"), + pod("pod1", "s0:c1,c2", ptr.To(v1.SELinuxChangePolicyRecursive)).withPVC("pvc1", "vol1").build(), + pod("pod2", "s0:c98,c99", ptr.To(v1.SELinuxChangePolicyMountOption)).withPVC("pvc1", "vol1").build(), }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, conflicts: []volumecache.Conflict{ @@ -302,7 +302,7 @@ func TestSELinuxWarningController_Sync(t *testing.T) { pvBoundToPVC("pv1", "pvc1"), }, existingPods: []*v1.Pod{ - podWithPVC("pod1", "s0:c1,c2", nil, "pvc1", "vol1"), + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").build(), // "pod2" does not exist }, pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, @@ -338,6 +338,107 @@ func TestSELinuxWarningController_Sync(t *testing.T) { `Normal SELinuxLabelConflict SELinuxLabel ":::s0:c1,c2" conflicts with pod pod2 that uses the same volume as this pod with SELinuxLabel ":::s0:c98,c99". If both pods land on the same node, only one of them may access the volume.`, }, }, + { + name: "empty label implies Recursive policy", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + pod("pod1", "", ptr.To(v1.SELinuxChangePolicyMountOption)).withPVC("pvc1", "vol1").build(), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + conflicts: []volumecache.Conflict{}, + expectedAddedVolumes: []addedVolume{ + { + volumeName: "fake-plugin/pv1", + podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + label: "", + changePolicy: v1.SELinuxChangePolicyRecursive, // Reset to Recursive when the label is empty + csiDriver: "ebs.csi.aws.com", + }, + }, + }, + { + name: "pending pod is processed", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").withPhase(v1.PodPending).build(), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + expectedEvents: nil, + expectedAddedVolumes: []addedVolume{ + { + volumeName: "fake-plugin/pv1", + podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + label: ":::s0:c1,c2", + changePolicy: v1.SELinuxChangePolicyMountOption, + csiDriver: "ebs.csi.aws.com", + }, + }, + }, + { + name: "unknown pod is processed", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").withPhase(v1.PodUnknown).build(), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + expectedEvents: nil, + expectedAddedVolumes: []addedVolume{ + { + volumeName: "fake-plugin/pv1", + podKey: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + label: ":::s0:c1,c2", + changePolicy: v1.SELinuxChangePolicyMountOption, + csiDriver: "ebs.csi.aws.com", + }, + }, + }, + { + name: "succeeded pod is removed from the cache", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").withPhase(v1.PodSucceeded).build(), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + expectedEvents: nil, + expectedAddedVolumes: nil, + expectedDeletedPods: []cache.ObjectName{{Namespace: namespace, Name: "pod1"}}, + }, + { + name: "failed pod is removed from the cache", + existingPVCs: []*v1.PersistentVolumeClaim{ + pvcBoundToPV("pv1", "pvc1"), + }, + existingPVs: []*v1.PersistentVolume{ + pvBoundToPVC("pv1", "pvc1"), + }, + existingPods: []*v1.Pod{ + pod("pod1", "s0:c1,c2", nil).withPVC("pvc1", "vol1").withPhase(v1.PodFailed).build(), + }, + pod: cache.ObjectName{Namespace: namespace, Name: "pod1"}, + expectedEvents: nil, + expectedAddedVolumes: nil, + expectedDeletedPods: []cache.ObjectName{{Namespace: namespace, Name: "pod1"}}, + }, { name: "deleted pod", existingPods: []*v1.Pod{ @@ -490,49 +591,63 @@ func pvcBoundToPV(pvName, pvcName string) *v1.PersistentVolumeClaim { return pvc } -func pod(podName, level string, changePolicy *v1.PodSELinuxChangePolicy) *v1.Pod { +type podBuilder struct { + pod *v1.Pod +} + +func pod(podName, level string, changePolicy *v1.PodSELinuxChangePolicy) *podBuilder { var opts *v1.SELinuxOptions if level != "" { opts = &v1.SELinuxOptions{ Level: level, } } - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns1", - Name: podName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "container1", - Image: "image1", - VolumeMounts: []v1.VolumeMount{ - { - Name: "vol1", - MountPath: "/mnt", + return &podBuilder{ + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: podName, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container1", + Image: "image1", + VolumeMounts: []v1.VolumeMount{ + { + Name: "vol1", + MountPath: "/mnt", + }, }, }, }, - }, - SecurityContext: &v1.PodSecurityContext{ - SELinuxChangePolicy: changePolicy, - SELinuxOptions: opts, - }, - Volumes: []v1.Volume{ - { - Name: "emptyDir1", - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, + SecurityContext: &v1.PodSecurityContext{ + SELinuxChangePolicy: changePolicy, + SELinuxOptions: opts, + }, + Volumes: []v1.Volume{ + { + Name: "emptyDir1", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, }, }, }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, }, } } -func addInlineVolume(pod *v1.Pod) *v1.Pod { - pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ +func (b *podBuilder) withPhase(phase v1.PodPhase) *podBuilder { + b.pod.Status.Phase = phase + return b +} + +func (b *podBuilder) withInlineVolume() *podBuilder { + b.pod.Spec.Volumes = append(b.pod.Spec.Volumes, v1.Volume{ Name: "inlineVolume", VolumeSource: v1.VolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ @@ -540,17 +655,15 @@ func addInlineVolume(pod *v1.Pod) *v1.Pod { }, }, }) - pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ + b.pod.Spec.Containers[0].VolumeMounts = append(b.pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: "inlineVolume", MountPath: "/mnt", }) - - return pod + return b } -func podWithPVC(podName, label string, changePolicy *v1.PodSELinuxChangePolicy, pvcName, volumeName string) *v1.Pod { - pod := pod(podName, label, changePolicy) - pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ +func (b *podBuilder) withPVC(pvcName, volumeName string) *podBuilder { + b.pod.Spec.Volumes = append(b.pod.Spec.Volumes, v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ @@ -558,11 +671,15 @@ func podWithPVC(podName, label string, changePolicy *v1.PodSELinuxChangePolicy, }, }, }) - pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ + b.pod.Spec.Containers[0].VolumeMounts = append(b.pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: volumeName, MountPath: "/mnt", }) - return pod + return b +} + +func (b *podBuilder) build() *v1.Pod { + return b.pod } type addedVolume struct { diff --git a/test/e2e/storage/csimock/base.go b/test/e2e/storage/csimock/base.go index 4c51a11a319dc..86c75e48507ef 100644 --- a/test/e2e/storage/csimock/base.go +++ b/test/e2e/storage/csimock/base.go @@ -149,6 +149,7 @@ const ( var ( errPodCompleted = fmt.Errorf("pod ran to completion") errNotEnoughSpace = errors.New(errReasonNotEnoughSpace) + sleepCommand = []string{"sleep", "infinity"} ) func newMockDriverSetup(f *framework.Framework) *mockDriverSetup { @@ -474,7 +475,15 @@ func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int return class, claim, pod } -func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy, privileged bool) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { +func (m *mockDriverSetup) createPodWithSELinux( + ctx context.Context, + accessModes []v1.PersistentVolumeAccessMode, + mountOptions []string, + seLinuxOpts *v1.SELinuxOptions, + policy *v1.PodSELinuxChangePolicy, + privileged bool, + command []string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { + ginkgo.By("Creating pod with SELinux context") f := m.f nodeSelection := m.config.ClientNodeSelection @@ -491,7 +500,7 @@ func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes ReclaimPolicy: m.tp.reclaimPolicy, } class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes) - pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts, policy, privileged) + pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts, policy, privileged, command) framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err) if class != nil { @@ -866,7 +875,19 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } -func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions, policy *v1.PodSELinuxChangePolicy, privileged bool) (*v1.Pod, error) { +func startPausePodWithSELinuxOptions( + cs clientset.Interface, + pvc *v1.PersistentVolumeClaim, + node e2epod.NodeSelection, + ns string, + seLinuxOpts *v1.SELinuxOptions, + policy *v1.PodSELinuxChangePolicy, + privileged bool, + command []string) (*v1.Pod, error) { + + if len(command) == 0 { + command = sleepCommand + } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-volume-tester-", @@ -878,8 +899,9 @@ func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentV }, Containers: []v1.Container{ { - Name: "volume-tester", - Image: imageutils.GetE2EImage(imageutils.Pause), + Name: "volume-tester", + Image: e2epod.GetDefaultTestImage(), + Command: command, SecurityContext: &v1.SecurityContext{ Privileged: &privileged, }, diff --git a/test/e2e/storage/csimock/csi_selinux_mount.go b/test/e2e/storage/csimock/csi_selinux_mount.go index 3a7045cbaf905..2c6012035e6c6 100644 --- a/test/e2e/storage/csimock/csi_selinux_mount.go +++ b/test/e2e/storage/csimock/csi_selinux_mount.go @@ -298,7 +298,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { // Act ginkgo.By("Starting the initial pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts, t.firstPodChangePolicy, false /* privileged */) + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts, t.firstPodChangePolicy, false /* privileged */, sleepCommand) err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "starting the initial pod") @@ -331,7 +331,15 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "getting the initial pod") nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} - pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy, false /* privileged */) + pod2, err := startPausePodWithSELinuxOptions( + f.ClientSet, + claim, + nodeSelection, + f.Namespace.Name, + t.secondPodSELinuxOpts, + t.secondPodChangePolicy, + false, /* privileged */ + sleepCommand) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) @@ -454,6 +462,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC firstPodSELinuxOpts *v1.SELinuxOptions firstPodChangePolicy *v1.PodSELinuxChangePolicy firstPodPrivileged bool + firstPodTargetPhase v1.PodPhase // Phase the first pod should reach before the second pod is created. Empty value means Running secondPodSELinuxOpts *v1.SELinuxOptions secondPodChangePolicy *v1.PodSELinuxChangePolicy secondPodPrivileged bool @@ -719,6 +728,30 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC expectControllerConflictProperty: "SELinuxLabel", testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, }, + { + name: "error is not bumped on a finished Pod with a different context on RWO volume and SELinuxMount enabled", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + firstPodTargetPhase: v1.PodSucceeded, + secondPodSELinuxOpts: &seLinuxOpts2, + volumeMode: v1.ReadWriteOnce, + waitForSecondPodStart: true, + // The volume is unmounted between the first and the second Pod, so admitted_total increases, + expectNodeIncreases: sets.New[string]("volume_manager_selinux_volumes_admitted_total"), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, + { + name: "error is not bumped on a failed Pod with a different context on RWO volume and SELinuxMount enabled", + csiDriverSELinuxEnabled: true, + firstPodSELinuxOpts: &seLinuxOpts1, + firstPodTargetPhase: v1.PodFailed, + secondPodSELinuxOpts: &seLinuxOpts2, + volumeMode: v1.ReadWriteOnce, + waitForSecondPodStart: true, + // The volume is unmounted between the first and the second Pod, so admitted_total increases, + expectNodeIncreases: sets.New[string]("volume_manager_selinux_volumes_admitted_total"), + testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMount)}, + }, } for _, t := range tests { t := t @@ -726,6 +759,9 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC if processLabel == "" { e2eskipper.Skipf("SELinux tests are supported only on %+v", getSupportedSELinuxDistros()) } + if t.firstPodTargetPhase == "" { + t.firstPodTargetPhase = v1.PodRunning + } // Some metrics use CSI driver name as a label, which is "csi-mock-" + the namespace name. volumePluginLabel := "volume_plugin=\"kubernetes.io/csi/csi-mock-" + f.Namespace.Name + "\"" @@ -744,9 +780,24 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC ginkgo.By("Starting the first pod") accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode} - _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts, t.firstPodChangePolicy, t.firstPodPrivileged) - err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) - framework.ExpectNoError(err, "starting the initial pod") + command := sleepCommand + switch t.firstPodTargetPhase { + case v1.PodSucceeded: + command = []string{"/bin/true"} + case v1.PodFailed: + command = []string{"/bin/false"} + } + _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts, t.firstPodChangePolicy, t.firstPodPrivileged, command) + + switch t.firstPodTargetPhase { + case v1.PodRunning: + err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "starting the initial pod") + case v1.PodSucceeded, v1.PodFailed: + ginkgo.By("Waiting for the first pod to complete") + err = e2epod.WaitForPodNoLongerRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace) + framework.ExpectNoError(err, "starting and completing the initial pod") + } ginkgo.By("Grabbing initial metrics") pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) @@ -759,7 +810,15 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC ginkgo.By("Starting the second pod") // Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV. nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName} - pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts, t.secondPodChangePolicy, t.secondPodPrivileged) + pod2, err := startPausePodWithSELinuxOptions( + f.ClientSet, + claim, + nodeSelection, + f.Namespace.Name, + t.secondPodSELinuxOpts, + t.secondPodChangePolicy, + t.secondPodPrivileged, + sleepCommand) framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts) m.pods = append(m.pods, pod2) @@ -795,6 +854,9 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics and SELinuxWarningC // Check the controler generated event on the second pod err = waitForConflictEvent(ctx, m.cs, pod2, pod, t.expectControllerConflictProperty, f.Timeouts.PodStart) framework.ExpectNoError(err, "while waiting for an event on the second pod") + } else { + err := checkForNoConflictEvents(ctx, m.cs, pod, pod2) + framework.ExpectNoError(err, "ensuring there are no SELinux conflict events") } } // t.testTags is array and it's not possible to use It("name", func(){xxx}, t.testTags...) @@ -974,6 +1036,29 @@ func waitForConflictEvent(ctx context.Context, cs clientset.Interface, pod, othe return e2eevents.WaitTimeoutForEvent(ctx, cs, pod.Namespace, eventSelector, msg, timeout) } +func checkForNoConflictEvents(ctx context.Context, cs clientset.Interface, pod, otherPod *v1.Pod) error { + eventSelector := fields.Set{ + "involvedObject.kind": "Pod", + "involvedObject.name": pod.Name, + "involvedObject.namespace": pod.Namespace, + }.AsSelector().String() + options := metav1.ListOptions{FieldSelector: eventSelector} + + events, err := cs.CoreV1().Events(pod.Namespace).List(ctx, options) + if err != nil { + return fmt.Errorf("error getting events: %w", err) + } + + msg := fmt.Sprintf("conflicts with pod %s that uses the same volume as this pod", otherPod.Name) + ginkgo.By(fmt.Sprintf("Checking for the SELinux controller events on pod %q: %q", pod.Name, msg)) + for _, event := range events.Items { + if strings.Contains(event.Message, msg) { + return fmt.Errorf("conflict event found: %s", event.Message) + } + } + return nil +} + func dumpMetrics(metrics map[string]float64) { // Print the metrics sorted by metric name for better readability keys := make([]string, 0, len(metrics))