@@ -887,8 +887,95 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() {
887
887
})
888
888
})
889
889
890
+ Context ("Check container runtimes cpu usage" , func () {
891
+ var guaranteedPod , bestEffortPod * corev1.Pod
892
+
893
+ AfterEach (func () {
894
+ cmd := []string {"rm" , "-f" , "/rootfs/var/roothome/create" }
895
+ nodes .ExecCommand (ctx , workerRTNode , cmd )
896
+ deleteTestPod (ctx , guaranteedPod )
897
+ deleteTestPod (ctx , bestEffortPod )
898
+ })
899
+
900
+ It ("[test_id: 74461] Verify that runc excludes the cpus used by guaranteed pod" , func () {
901
+ guaranteedPod , err = getPod (ctx , workerRTNode , true )
902
+ err = testclient .Client .Create (ctx , guaranteedPod )
903
+ Expect (err ).ToNot (HaveOccurred ())
904
+ _ , err = pods .WaitForCondition (ctx , client .ObjectKeyFromObject (guaranteedPod ), corev1 .PodReady , corev1 .ConditionTrue , 5 * time .Minute )
905
+ Expect (err ).ToNot (HaveOccurred ())
906
+ Expect (guaranteedPod .Status .QOSClass ).To (Equal (corev1 .PodQOSGuaranteed ))
907
+
908
+ bestEffortPod , err = getPod (ctx , workerRTNode , false )
909
+ err = testclient .Client .Create (ctx , bestEffortPod )
910
+ Expect (err ).ToNot (HaveOccurred ())
911
+ _ , err = pods .WaitForCondition (ctx , client .ObjectKeyFromObject (bestEffortPod ), corev1 .PodReady , corev1 .ConditionTrue , 5 * time .Minute )
912
+ Expect (err ).ToNot (HaveOccurred ())
913
+
914
+ containerIDs := make ([]string , 2 )
915
+
916
+ containerIDs [0 ], err = pods .GetContainerIDByName (guaranteedPod , "test" )
917
+ Expect (err ).ToNot (HaveOccurred ())
918
+ containerIDs [1 ], err = pods .GetContainerIDByName (bestEffortPod , "test" )
919
+ Expect (err ).ToNot (HaveOccurred ())
920
+ for _ , containerID := range containerIDs {
921
+ path := fmt .Sprintf ("/rootfs/var/run/containers/storage/overlay-containers/%s/userdata/config.json" , containerID )
922
+ cmd := []string {"/bin/bash" , "-c" , fmt .Sprintf ("cat %s >> /rootfs/var/roothome/create" , path )}
923
+ nodes .ExecCommand (ctx , workerRTNode , cmd )
924
+ }
925
+ cmd := []string {"cat" , "/rootfs/var/roothome/create" }
926
+ output , err := nodes .ExecCommand (context .TODO (), workerRTNode , cmd )
927
+ Expect (err ).ToNot (HaveOccurred ())
928
+
929
+ out := testutils .ToString (output )
930
+ hostnameRe := regexp .MustCompile (`"hostname":\s+"([^"]+)"` )
931
+ cpusRe := regexp .MustCompile (`"cpus":\s+"([^"]+)"` )
932
+
933
+ hostnameMatches := hostnameRe .FindAllStringSubmatch (out , - 1 )
934
+ cpusMatches := cpusRe .FindAllStringSubmatch (out , - 1 )
935
+ Expect (len (hostnameMatches )).ToNot (Equal (0 ), "Failed to extract hostname information" )
936
+ Expect (len (cpusMatches )).ToNot (Equal (0 ), "Failed to extract cpus information" )
937
+ uniqueCombinations := make (map [string ]struct {})
938
+ zippedMatches := make ([]map [string ]string , 0 )
939
+ for i := 0 ; i < len (hostnameMatches ) && i < len (cpusMatches ); i ++ {
940
+ combination := fmt .Sprintf ("%s-%s" , hostnameMatches [i ][1 ], cpusMatches [i ][1 ])
941
+ if _ , exists := uniqueCombinations [combination ]; ! exists {
942
+ uniqueCombinations [combination ] = struct {}{}
943
+ zippedMatches = append (zippedMatches , map [string ]string {
944
+ "hostname" : hostnameMatches [i ][1 ],
945
+ "cpus" : cpusMatches [i ][1 ],
946
+ })
947
+ }
948
+ }
949
+ guaranteedPodCpus , err := cpuset .Parse (zippedMatches [0 ]["cpus" ])
950
+ Expect (err ).ToNot (HaveOccurred ())
951
+ runcCpus , err := cpuset .Parse (zippedMatches [1 ]["cpus" ])
952
+ Expect (err ).ToNot (HaveOccurred ())
953
+ overlapFound := ! guaranteedPodCpus .Intersection (runcCpus ).IsEmpty ()
954
+ Expect (overlapFound ).ToNot (BeTrue (), fmt .Sprintf ("Overlap found between guaranteedPod cpus (%s) and runtime Cpus (%s), not expected behaviour" , guaranteedPodCpus , runcCpus ))
955
+ })
956
+ })
957
+
890
958
})
891
959
960
+ func getPod (ctx context.Context , workerRTNode * corev1.Node , guaranteed bool ) (* corev1.Pod , error ) {
961
+ testPod := pods .GetTestPod ()
962
+ testPod .Namespace = testutils .NamespaceTesting
963
+ testPod .Spec .NodeSelector = map [string ]string {testutils .LabelHostname : workerRTNode .Name }
964
+ if guaranteed {
965
+ testPod .Spec .Containers [0 ].Resources = corev1.ResourceRequirements {
966
+ Limits : corev1.ResourceList {
967
+ corev1 .ResourceCPU : resource .MustParse ("2" ),
968
+ corev1 .ResourceMemory : resource .MustParse ("200Mi" ),
969
+ },
970
+ }
971
+ }
972
+ profile , _ := profiles .GetByNodeLabels (testutils .NodeSelectorLabels )
973
+ runtimeClass := components .GetComponentName (profile .Name , components .ComponentNamePrefix )
974
+ testPod .Spec .RuntimeClassName = & runtimeClass
975
+ return testPod , nil
976
+ }
977
+
978
+
892
979
func checkForWorkloadPartitioning (ctx context.Context ) bool {
893
980
// Look for the correct Workload Partition annotation in
894
981
// a crio configuration file on the target node
0 commit comments