Skip to content

Commit 3130a4f

Browse files
committed
MCO-2006: Migrate pinned images sets private test cases
1 parent 9e2ecbb commit 3130a4f

17 files changed

+1471
-0
lines changed

cmd/machine-config-tests-ext/main.go

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,18 @@ func main() {
7272
},
7373
ClusterStability: e.ClusterStabilityDisruptive,
7474
TestTimeout: &defaultTimeout,
75+
Description: "Suite that daily sends signals to component readiness"
76+
})
77+
78+
// This suite will include all tests not included in the previous suite inheriting from openshift/disruptive
79+
ext.AddGlobalSuite(e.Suite{
80+
Name: "openshift/machine-config-operator/longduration",
81+
Qualifiers: []string{
82+
`name.contains("[Suite:openshift/machine-config-operator/longduration]")`,
83+
},
84+
ClusterStability: e.ClusterStabilityDisruptive,
85+
TestTimeout: &defaultTimeout,
86+
Description: "A long-running, resource-intensive test suite executed on a scheduled basis to provide deep validation beyond the standard executions",
7587
})
7688

7789
// If using Ginkgo, build test specs automatically

test/extended-priv/const.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,4 +75,9 @@ const (
7575
MachineSetResource = "machinesets"
7676
// ControlPlaneMachineSetResource is the resource name for controlplanemachinesets
7777
ControlPlaneMachineSetResource = "controlplanemachinesets"
78+
79+
// BusyBoxImage the multiplatform busybox image stored in openshifttest
80+
BusyBoxImage = "quay.io/openshifttest/busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f"
81+
// AlpineImage the multiplatform alpine image stored in openshifttest
82+
AlpineImage = "quay.io/openshifttest/alpine@sha256:dc1536cbff0ba235d4219462aeccd4caceab9def96ae8064257d049166890083"
7883
)

test/extended-priv/controller.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,3 +130,19 @@ func (mcc *Controller) HasAcquiredLease() (bool, error) {
130130

131131
return strings.Contains(podAllLogs, "successfully acquired lease"), nil
132132
}
133+
134+
// GetNode return the node where the machine controller is running
135+
func (mcc *Controller) GetNode() (*Node, error) {
136+
controllerPodName, err := mcc.GetCachedPodName()
137+
if err != nil {
138+
return nil, err
139+
}
140+
141+
controllerPod := NewNamespacedResource(mcc.oc, "pod", MachineConfigNamespace, controllerPodName)
142+
nodeName, err := controllerPod.Get(`{.spec.nodeName}`)
143+
if err != nil {
144+
return nil, err
145+
}
146+
147+
return NewNode(mcc.oc, nodeName), nil
148+
}

test/extended-priv/gomega_matchers.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,3 +152,8 @@ func (matcher *AvailableMatcher) NegatedFailureMessage(actual interface{}) (mess
152152

153153
return message
154154
}
155+
156+
// BeAvailable returns the gomega matcher to check if a resource is available or not.
157+
func BeAvailable() types.GomegaMatcher {
158+
return &DegradedMatcher{&conditionMatcher{conditionType: "Available", field: "status", expected: "True"}}
159+
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
package extended
2+
3+
import (
4+
"fmt"
5+
6+
o "github.com/onsi/gomega"
7+
8+
exutil "github.com/openshift/machine-config-operator/test/extended-priv/util"
9+
logger "github.com/openshift/machine-config-operator/test/extended-priv/util/logext"
10+
)
11+
12+
// KubeletConfig struct is used to handle KubeletConfig resources in OCP
13+
type KubeletConfig struct {
14+
Resource
15+
template string
16+
}
17+
18+
// KubeletConfigList handles list of nodes
19+
type KubeletConfigList struct {
20+
ResourceList
21+
}
22+
23+
// NewKubeletConfig create a NewKubeletConfig struct
24+
func NewKubeletConfig(oc *exutil.CLI, name, template string) *KubeletConfig {
25+
return &KubeletConfig{Resource: *NewResource(oc, "KubeletConfig", name), template: template}
26+
}
27+
28+
// NewKubeletConfigList create a NewKubeletConfigList struct
29+
func NewKubeletConfigList(oc *exutil.CLI) *KubeletConfigList {
30+
return &KubeletConfigList{*NewResourceList(oc, "KubeletConfig")}
31+
}
32+
33+
func (kc *KubeletConfig) create(parameters ...string) {
34+
allParams := []string{"--ignore-unknown-parameters=true", "-f", kc.template,
35+
"-p", "NAME=" + kc.name}
36+
allParams = append(allParams, parameters...)
37+
exutil.CreateClusterResourceFromTemplate(kc.oc, allParams...)
38+
}
39+
40+
func (kc KubeletConfig) waitUntilSuccess(timeout string) {
41+
logger.Infof("wait for %s to report success", kc.name)
42+
o.EventuallyWithOffset(1, &kc, timeout, "2s").Should(o.SatisfyAll(
43+
HaveConditionField("Success", "status", "True"),
44+
HaveConditionField("Success", "message", "Success"),
45+
), "KubeletConfig '%s' should report Success in status.conditions, but the current status is not success", kc.GetName())
46+
}
47+
48+
func (kc KubeletConfig) waitUntilFailure(expectedMsg, timeout string) {
49+
logger.Infof("wait for %s to report failure", kc.name)
50+
o.EventuallyWithOffset(1, &kc, timeout, "2s").Should(o.SatisfyAll(
51+
HaveConditionField("Failure", "status", "False"),
52+
HaveConditionField("Failure", "message", o.ContainSubstring(expectedMsg)),
53+
), "KubeletConfig '%s' should report Failure in status.conditions and report failure message %s. But it doesn't.", kc.GetName(), expectedMsg)
54+
}
55+
56+
// GetGeneratedMCName returns the name of the MC that was generated by this KubeletConfig resource
57+
func (kc KubeletConfig) GetGeneratedMCName() (string, error) {
58+
mcName, err := kc.Get(`{.metadata.finalizers[0]}`)
59+
if err != nil {
60+
return "", err
61+
}
62+
if mcName == "" {
63+
return "", fmt.Errorf("It was not possible to get the finalizer from %s %s: %s", kc.GetKind(), kc.GetName(), kc.PrettyString())
64+
}
65+
return mcName, nil
66+
}
67+
68+
// GetGeneratedMCNameOrFail returns the name of the MC that was generated by this KubeletConfig resource and fails the test case if it cannot be done
69+
func (kc KubeletConfig) GetGeneratedMCNameOrFail() string {
70+
mcName, err := kc.GetGeneratedMCName()
71+
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the generated MC for %s %s", kc.GetKind(), kc.GetName())
72+
return mcName
73+
}

test/extended-priv/machineconfignode.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,3 +13,18 @@ type MachineConfigNode struct {
1313
func NewMachineConfigNode(oc *exutil.CLI, node string) *MachineConfigNode {
1414
return &MachineConfigNode{Resource: *NewResource(oc, "machineconfignode", node)}
1515
}
16+
17+
// IsPinnedImageSetsDegraded returns true if the PinnedImageSetsDegraded condition is true
18+
func (mcn *MachineConfigNode) IsPinnedImageSetsDegraded() bool {
19+
return mcn.IsConditionStatusTrue("PinnedImageSetsDegraded")
20+
}
21+
22+
// IsPinnedImageSetsProgressing returns true if the PinnedImageSetsProgressing condition is true
23+
func (mcn *MachineConfigNode) IsPinnedImageSetsProgressing() bool {
24+
return mcn.IsConditionStatusTrue("PinnedImageSetsProgressing")
25+
}
26+
27+
// GetPinnedImageSetLastFailedError returns the last failed generation error for pinned image sets
28+
func (mcn *MachineConfigNode) GetPinnedImageSetLastFailedError() string {
29+
return mcn.GetOrFail(`{.status.pinnedImageSets[*].lastFailedGenerationError}`)
30+
}

test/extended-priv/machineconfigpool.go

Lines changed: 195 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
o "github.com/onsi/gomega"
1313
exutil "github.com/openshift/machine-config-operator/test/extended-priv/util"
1414
logger "github.com/openshift/machine-config-operator/test/extended-priv/util/logext"
15+
"github.com/tidwall/gjson"
1516
"k8s.io/apimachinery/pkg/util/wait"
1617
e2e "k8s.io/kubernetes/test/e2e/framework"
1718
)
@@ -848,3 +849,197 @@ func DebugDegradedStatus(mcp *MachineConfigPool) {
848849
logger.Infof("Last %d lines of MCC:\n%s", maxMCCLines, GetLastNLines(mccLogs, maxMCCLines))
849850
logger.Infof("END DEBUG")
850851
}
852+
853+
// DeleteCustomMCP deletes a custom MCP and removes its labels from all nodes
854+
func DeleteCustomMCP(oc *exutil.CLI, name string) error {
855+
mcp := NewMachineConfigPool(oc, name)
856+
if !mcp.Exists() {
857+
logger.Infof("MCP %s does not exist. No need to remove it", mcp.GetName())
858+
return nil
859+
}
860+
861+
exutil.By(fmt.Sprintf("Removing custom MCP %s", name))
862+
863+
nodes, err := mcp.GetNodes()
864+
if err != nil {
865+
logger.Errorf("Could not get the nodes that belong to MCP %s: %s", mcp.GetName(), err)
866+
return err
867+
}
868+
869+
label := fmt.Sprintf("node-role.kubernetes.io/%s", mcp.GetName())
870+
for _, node := range nodes {
871+
logger.Infof("Removing pool label from node %s", node.GetName())
872+
err := node.RemoveLabel(label)
873+
if err != nil {
874+
logger.Errorf("Could not remove the role label from node %s: %s", node.GetName(), err)
875+
return err
876+
}
877+
}
878+
879+
for _, node := range nodes {
880+
err := node.WaitForLabelRemoved(label)
881+
if err != nil {
882+
logger.Errorf("The label %s was not removed from node %s", label, node.GetName())
883+
}
884+
}
885+
886+
err = mcp.WaitForMachineCount(0, 5*time.Minute)
887+
if err != nil {
888+
logger.Errorf("The %s MCP already contains nodes, it cannot be deleted: %s", mcp.GetName(), err)
889+
return err
890+
}
891+
892+
// Wait for worker MCP to be updated before removing the custom pool
893+
// in order to make sure that no node has any annotation pointing to resources that depend on the custom pool that we want to delete
894+
wMcp := NewMachineConfigPool(oc, MachineConfigPoolWorker)
895+
wMcp.waitForComplete()
896+
897+
err = mcp.Delete()
898+
if err != nil {
899+
logger.Errorf("Could not delete %s MCP", mcp.GetName())
900+
return err
901+
}
902+
903+
return nil
904+
}
905+
906+
// GetPoolSynchronizersStatusByType returns the pool synchronizer status for a given type
907+
func (mcp *MachineConfigPool) GetPoolSynchronizersStatusByType(pType string) (string, error) {
908+
return mcp.Get(`{.status.poolSynchronizersStatus[?(@.poolSynchronizerType=="` + pType + `")]}`)
909+
}
910+
911+
// IsPinnedImagesComplete returns if the MCP is reporting that there is no pinnedimages operation in progress
912+
func (mcp *MachineConfigPool) IsPinnedImagesComplete() (bool, error) {
913+
914+
pinnedStatus, err := mcp.GetPoolSynchronizersStatusByType("PinnedImageSets")
915+
if err != nil {
916+
return false, err
917+
}
918+
919+
logger.Infof("Pinned status: %s", pinnedStatus)
920+
921+
mcpMachineCount, err := mcp.Get(`{.status.machineCount}`)
922+
if err != nil {
923+
return false, err
924+
}
925+
926+
if mcpMachineCount == "" {
927+
return false, fmt.Errorf("status.machineCount is empty in mcp %s", mcp.GetName())
928+
}
929+
930+
pinnedMachineCount := gjson.Get(pinnedStatus, "machineCount").String()
931+
if pinnedMachineCount == "" {
932+
return false, fmt.Errorf("pinned status machineCount is empty in mcp %s", mcp.GetName())
933+
}
934+
935+
pinnedUnavailableMachineCount := gjson.Get(pinnedStatus, "unavailableMachineCount").String()
936+
if pinnedUnavailableMachineCount == "" {
937+
return false, fmt.Errorf("pinned status unavailableMachineCount is empty in mcp %s", mcp.GetName())
938+
}
939+
940+
updatedMachineCount := gjson.Get(pinnedStatus, "updatedMachineCount").String()
941+
if updatedMachineCount == "" {
942+
return false, fmt.Errorf("pinned status updatedMachineCount is empty in mcp %s", mcp.GetName())
943+
}
944+
945+
return mcpMachineCount == pinnedMachineCount && updatedMachineCount == pinnedMachineCount && pinnedUnavailableMachineCount == "0", nil
946+
}
947+
948+
// allNodesReportingPinnedSuccess returns true if all nodes in the pool are reporting pinned images success
949+
func (mcp *MachineConfigPool) allNodesReportingPinnedSuccess() (bool, error) {
950+
allNodes, err := mcp.GetNodes()
951+
if err != nil {
952+
return false, err
953+
}
954+
955+
if len(allNodes) == 0 {
956+
logger.Infof("Warning, pool %s has no nodes!! We consider all nodes as correctly pinned", mcp.GetName())
957+
}
958+
959+
for _, node := range allNodes {
960+
nodeMCN := node.GetMachineConfigNode()
961+
if nodeMCN.IsPinnedImageSetsDegraded() {
962+
logger.Infof("Node %s is pinned degraded. Condition:\n%s", node.GetName(), nodeMCN.GetConditionByType("PinnedImageSetsDegraded"))
963+
return false, nil
964+
}
965+
966+
if nodeMCN.IsPinnedImageSetsProgressing() {
967+
return false, nil
968+
}
969+
}
970+
971+
return true, nil
972+
}
973+
974+
// waitForPinComplete waits for the MCP to complete pinning images
975+
func (mcp *MachineConfigPool) waitForPinComplete(timeToWait time.Duration) error {
976+
logger.Infof("Waiting %s for MCP %s to complete pinned images.", timeToWait, mcp.name)
977+
978+
immediate := false
979+
err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Minute, timeToWait, immediate, func(_ context.Context) (bool, error) {
980+
pinnedComplete, err := mcp.IsPinnedImagesComplete()
981+
if err != nil {
982+
983+
logger.Infof("Error getting pinned complete: %s", err)
984+
return false, err
985+
}
986+
987+
if !pinnedComplete {
988+
logger.Infof("Waiting for PinnedImageSets poolSynchronizersStatus status to repot success")
989+
return false, nil
990+
}
991+
992+
allNodesComplete, err := mcp.allNodesReportingPinnedSuccess()
993+
if err != nil {
994+
logger.Infof("Error getting if all nodes finished")
995+
return false, err
996+
}
997+
998+
if !allNodesComplete {
999+
logger.Infof("Waiting for all nodes to report pinned images success")
1000+
return false, nil
1001+
}
1002+
1003+
logger.Infof("Pool %s successfully pinned the images! Complete!", mcp.GetName())
1004+
return true, nil
1005+
})
1006+
1007+
if err != nil {
1008+
logger.Infof("Pinned images operation is not completed on mcp %s", mcp.name)
1009+
}
1010+
return err
1011+
}
1012+
1013+
// GetPinnedImageSets returns a list with the nodes that match the .spec.nodeSelector.matchLabels criteria plus the provided extraLabels
1014+
func (mcp *MachineConfigPool) GetPinnedImageSets() ([]*PinnedImageSet, error) {
1015+
mcp.oc.NotShowInfo()
1016+
defer mcp.oc.SetShowInfo()
1017+
1018+
labelsString, err := mcp.Get(`{.spec.machineConfigSelector.matchLabels}`)
1019+
if err != nil {
1020+
return nil, err
1021+
}
1022+
1023+
if labelsString == "" {
1024+
return nil, fmt.Errorf("No machineConfigSelector found in %s", mcp)
1025+
}
1026+
1027+
labels := gjson.Parse(labelsString)
1028+
1029+
requiredLabel := ""
1030+
labels.ForEach(func(key, value gjson.Result) bool {
1031+
requiredLabel += fmt.Sprintf("%s=%s,", key.String(), value.String())
1032+
return true // keep iterating
1033+
})
1034+
1035+
if requiredLabel == "" {
1036+
return nil, fmt.Errorf("No labels matcher could be built for %s", mcp)
1037+
}
1038+
// remove the last comma
1039+
requiredLabel = strings.TrimSuffix(requiredLabel, ",")
1040+
1041+
pisList := NewPinnedImageSetList(mcp.oc)
1042+
pisList.ByLabel(requiredLabel)
1043+
1044+
return pisList.GetAll()
1045+
}

0 commit comments

Comments
 (0)