diff --git a/openshift-hack/cmd/k8s-tests-ext/disabled_tests.go b/openshift-hack/cmd/k8s-tests-ext/disabled_tests.go index 87f900f1a5730..848d6cf179032 100644 --- a/openshift-hack/cmd/k8s-tests-ext/disabled_tests.go +++ b/openshift-hack/cmd/k8s-tests-ext/disabled_tests.go @@ -167,9 +167,6 @@ func filterOutDisabledSpecs(specs et.ExtensionTestSpecs) et.ExtensionTestSpecs { }, // tests that need to be temporarily disabled while the rebase is in progress. "RebaseInProgress": { - // https://issues.redhat.com/browse/OCPBUGS-61515 - "[sig-scheduling] SchedulerPreemption [Serial] validates various priority Pods preempt expectedly with the async preemption [Feature:SchedulerAsyncPreemption] [FeatureGate:SchedulerAsyncPreemption] [Beta]", - // https://issues.redhat.com/browse/OCPBUGS-61378 "[sig-network] Conntrack should be able to cleanup conntrack entries when UDP service target port changes for a NodePort service", diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index a4e3129c33a3f..04b6082265f88 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -373,7 +373,7 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() { highPriorityPods := make([]*v1.Pod, 0, 5*nodeListLen) mediumPriorityPods := make([]*v1.Pod, 0, 10*nodeListLen) - ginkgo.By("Run high/medium priority pods that have same requirements as that of lower priority pod") + ginkgo.By("Run medium priority pods that have same requirements as that of lower priority pod") for i := range nodeList.Items { // Create medium priority pods first // to confirm the scheduler finally prioritize the high priority pods, ignoring the medium priority pods. @@ -391,19 +391,6 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() { }) mediumPriorityPods = append(mediumPriorityPods, p) } - - for j := 0; j < 5; j++ { - p := createPausePod(ctx, f, pausePodConfig{ - Name: fmt.Sprintf("pod%d-%d-%v", i, j, highPriorityClassName), - PriorityClassName: highPriorityClassName, - Resources: &v1.ResourceRequirements{ - // Set the pod request to the low priority pod's resources - Requests: lowPriorityPods[0].Spec.Containers[0].Resources.Requests, - Limits: lowPriorityPods[0].Spec.Containers[0].Resources.Requests, - }, - }) - highPriorityPods = append(highPriorityPods, p) - } } // All low priority Pods should be the target of preemption. @@ -419,6 +406,22 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() { })) } + ginkgo.By("Run high priority pods that have same requirements as that of lower priority pod") + for i := range nodeList.Items { + for j := 0; j < 5; j++ { + p := createPausePod(ctx, f, pausePodConfig{ + Name: fmt.Sprintf("pod%d-%d-%v", i, j, highPriorityClassName), + PriorityClassName: highPriorityClassName, + Resources: &v1.ResourceRequirements{ + // Set the pod request to the low priority pod's resources + Requests: lowPriorityPods[0].Spec.Containers[0].Resources.Requests, + Limits: lowPriorityPods[0].Spec.Containers[0].Resources.Requests, + }, + }) + highPriorityPods = append(highPriorityPods, p) + } + } + // All high priority Pods should be schedulable by removing the low priority Pods. ginkgo.By("Wait for high priority pods to be ready for the preemption.") for _, pod := range highPriorityPods {