Skip to content
This repository has been archived by the owner on Oct 10, 2024. It is now read-only.

Commit

Permalink
Add some test cases (#14)
Browse files Browse the repository at this point in the history
* Add some test cases

Signed-off-by: zeroalphat <[email protected]>

* Reflect comments

Signed-off-by: zeroalphat <[email protected]>

---------

Signed-off-by: zeroalphat <[email protected]>
  • Loading branch information
zeroalphat authored Apr 8, 2024
1 parent 2a51113 commit b09b87a
Showing 1 changed file with 214 additions and 12 deletions.
226 changes: 214 additions & 12 deletions internal/controller/pod_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package controller
import (
"context"
"fmt"
"regexp"
"strings"

"github.com/cybozu-go/cat-gate/internal/constants"
. "github.com/onsi/ginkgo/v2"
Expand All @@ -27,7 +29,7 @@ var _ = Describe("CatGate controller", func() {
err := k8sClient.Create(ctx, namespace)
Expect(err).NotTo(HaveOccurred())

createNewPod(0, testName)
createNewPod(testName, 0)

pod := &corev1.Pod{}
Eventually(func(g Gomega) {
Expand All @@ -48,7 +50,7 @@ var _ = Describe("CatGate controller", func() {
Expect(err).NotTo(HaveOccurred())

for i := 0; i < 8; i++ {
createNewPod(i, testName)
createNewPod(testName, i)
}
pods := &corev1.PodList{}
Eventually(func(g Gomega) {
Expand All @@ -74,9 +76,11 @@ var _ = Describe("CatGate controller", func() {
err := k8sClient.Create(ctx, namespace)
Expect(err).NotTo(HaveOccurred())

for i := 0; i < 8; i++ {
createNewPod(i, testName)
for i := 0; i < 10; i++ {
createNewPod(testName, i)
createNewNode(testName, i)
}

pods := &corev1.PodList{}

Eventually(func(g Gomega) {
Expand All @@ -88,6 +92,7 @@ var _ = Describe("CatGate controller", func() {
numSchedulable += 1
}
}
// no pod is already running, so 1 pods should be scheduled
g.Expect(numSchedulable).To(Equal(1))
}).Should(Succeed())
scheduleAndStartPods(testName)
Expand All @@ -101,6 +106,7 @@ var _ = Describe("CatGate controller", func() {
numSchedulable += 1
}
}
// 1 pod is already running, so 3(1 + 1*2) pods should be scheduled
g.Expect(numSchedulable).To(Equal(3))
}).Should(Succeed())
scheduleAndStartPods(testName)
Expand All @@ -114,7 +120,8 @@ var _ = Describe("CatGate controller", func() {
numSchedulable += 1
}
}
g.Expect(numSchedulable).To(Equal(8))
// 3 pods are already running, so 9 (3 + 3*2) pods should be scheduled
g.Expect(numSchedulable).To(Equal(9))
}).Should(Succeed())
})

Expand All @@ -128,7 +135,7 @@ var _ = Describe("CatGate controller", func() {
err := k8sClient.Create(ctx, namespace)
Expect(err).NotTo(HaveOccurred())

createNewPod(0, testName)
createNewPod(testName, 0)

pod := &corev1.Pod{}
Eventually(func(g Gomega) {
Expand All @@ -137,7 +144,7 @@ var _ = Describe("CatGate controller", func() {
g.Expect(pod.Spec.SchedulingGates).NotTo(ConsistOf(corev1.PodSchedulingGate{Name: constants.PodSchedulingGateName}))
}).Should(Succeed())

pod = createNewPod(1, testName)
pod = createNewPod(testName, 1)
delete(pod.Annotations, constants.CatGateImagesHashAnnotation)
err = k8sClient.Update(ctx, pod)
Expect(err).NotTo(HaveOccurred())
Expand All @@ -159,8 +166,9 @@ var _ = Describe("CatGate controller", func() {
err := k8sClient.Create(ctx, namespace)
Expect(err).NotTo(HaveOccurred())

for i := 0; i < 8; i++ {
createNewPod(i, testName)
for i := 0; i < 10; i++ {
createNewPod(testName, i)
createNewNode(testName, i)
}
pods := &corev1.PodList{}

Expand All @@ -173,6 +181,7 @@ var _ = Describe("CatGate controller", func() {
numSchedulable += 1
}
}
// no pod is already running, so 1 pods should be scheduled
g.Expect(numSchedulable).To(Equal(1))
}).Should(Succeed())
scheduleAndStartPods(testName)
Expand All @@ -186,6 +195,7 @@ var _ = Describe("CatGate controller", func() {
numSchedulable += 1
}
}
// 1 pod is already running, so 3(1 + 1*2) pods should be scheduled
g.Expect(numSchedulable).To(Equal(3))
}).Should(Succeed())
scheduleAndStartOnePod(testName)
Expand All @@ -199,14 +209,124 @@ var _ = Describe("CatGate controller", func() {
numSchedulable += 1
}
}
// 2 pods are already running, so 6 (2 + 2*2) pods should be scheduled
g.Expect(numSchedulable).To(Equal(6))
}).Should(Succeed())
})

// TODO: add a test where one node has multiple running pods and only one additional pod would be scheduled.
It("should allow scheduling of additional pods when multiple pods are running on a single node", func() {
testName := "multiple-pods-running-on-single-node"
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testName,
},
}
err := k8sClient.Create(ctx, namespace)
Expect(err).NotTo(HaveOccurred())

for i := 0; i < 3; i++ {
createNewPod(testName, i)
}
for i := 0; i < 1; i++ {
createNewNode(testName, i)
}

nodes := &corev1.NodeList{}
err = k8sClient.List(ctx, nodes, &client.ListOptions{Namespace: testName})
Expect(err).NotTo(HaveOccurred())

pods := &corev1.PodList{}
Eventually(func(g Gomega) {
err := k8sClient.List(ctx, pods, &client.ListOptions{Namespace: testName})
g.Expect(err).NotTo(HaveOccurred())
numSchedulable := 0
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) {
numSchedulable += 1
}
}
// no pod is already running, so 1 pods should be scheduled
g.Expect(numSchedulable).To(Equal(1))
}).Should(Succeed())
scheduleSpecificNodeAndStartOnePod(testName, nodes.Items[0].Name)

Eventually(func(g Gomega) {
err := k8sClient.List(ctx, pods, &client.ListOptions{Namespace: testName})
g.Expect(err).NotTo(HaveOccurred())
numSchedulable := 0
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) {
numSchedulable += 1
}
}
// 1 pod is already running, so 3(1 + 1*2) pods should be scheduled
g.Expect(numSchedulable).To(Equal(3))
}).Should(Succeed())
scheduleSpecificNodeAndStartOnePod(testName, nodes.Items[0].Name)

Eventually(func(g Gomega) {
err := k8sClient.List(ctx, pods, &client.ListOptions{Namespace: testName})
g.Expect(err).NotTo(HaveOccurred())
numSchedulable := 0
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) {
numSchedulable += 1
}
}
// 2 pods are already running on a same node, so 3 pods should be scheduled
g.Expect(numSchedulable).To(Equal(3))
}).Should(Succeed())

})

It("Should the schedule not increase if the pod is not Running", func() {
testName := "crash-pod"
namespace := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testName,
},
}
err := k8sClient.Create(ctx, namespace)
Expect(err).NotTo(HaveOccurred())

for i := 0; i < 3; i++ {
createNewPod(testName, i)
}
for i := 0; i < 3; i++ {
createNewNode(testName, i)
}

pods := &corev1.PodList{}
Eventually(func(g Gomega) {
err := k8sClient.List(ctx, pods, &client.ListOptions{Namespace: testName})
g.Expect(err).NotTo(HaveOccurred())
numSchedulable := 0
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) {
numSchedulable += 1
}
}
// no pod is already running, so 1 pods should be scheduled
g.Expect(numSchedulable).To(Equal(1))
}).Should(Succeed())
scheduleAndStartOneUnhealthyPod(testName)

Eventually(func(g Gomega) {
err := k8sClient.List(ctx, pods, &client.ListOptions{Namespace: testName})
g.Expect(err).NotTo(HaveOccurred())
numSchedulable := 0
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) {
numSchedulable += 1
}
}
// 1 pod is already scheduled, but it is not running, so 1 pod should be scheduled
g.Expect(numSchedulable).To(Equal(1))
}).Should(Succeed())
})
})

func createNewPod(index int, testName string) *corev1.Pod {
func createNewPod(testName string, index int) *corev1.Pod {
newPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testName,
Expand All @@ -232,6 +352,17 @@ func createNewPod(index int, testName string) *corev1.Pod {
return newPod
}

func createNewNode(testName string, index int) {
newNode := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-node-%d", testName, index),
},
Status: corev1.NodeStatus{},
}
err := k8sClient.Create(ctx, newNode)
Expect(err).NotTo(HaveOccurred())
}

func scheduleAndStartPods(namespace string) {
pods := &corev1.PodList{}
err := k8sClient.List(ctx, pods, client.InNamespace(namespace))
Expand All @@ -240,6 +371,11 @@ func scheduleAndStartPods(namespace string) {
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) {
updatePodStatus(&pod, corev1.ContainerState{Running: &corev1.ContainerStateRunning{}})

node := &corev1.Node{}
err = k8sClient.Get(ctx, client.ObjectKey{Name: pod.Status.HostIP}, node)
Expect(err).NotTo(HaveOccurred())
updateNodeImageStatus(node, pod.Spec.Containers)
}
}
}
Expand All @@ -252,6 +388,46 @@ func scheduleAndStartOnePod(namespace string) {
for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) && len(pod.Status.ContainerStatuses) == 0 {
updatePodStatus(&pod, corev1.ContainerState{Running: &corev1.ContainerStateRunning{}})

node := &corev1.Node{}
err = k8sClient.Get(ctx, client.ObjectKey{Name: pod.Status.HostIP}, node)
Expect(err).NotTo(HaveOccurred())
updateNodeImageStatus(node, pod.Spec.Containers)
break
}
}
}

func scheduleSpecificNodeAndStartOnePod(namespace, nodeName string) {
pods := &corev1.PodList{}
err := k8sClient.List(ctx, pods, client.InNamespace(namespace))
Expect(err).NotTo(HaveOccurred())

for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) && len(pod.Status.ContainerStatuses) == 0 {
updatePodStatusWithHostIP(&pod, corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, nodeName)
node := &corev1.Node{}
err = k8sClient.Get(ctx, client.ObjectKey{Name: pod.Status.HostIP}, node)
Expect(err).NotTo(HaveOccurred())
updateNodeImageStatus(node, pod.Spec.Containers)
break
}
}
}

func scheduleAndStartOneUnhealthyPod(namespace string) {
pods := &corev1.PodList{}
err := k8sClient.List(ctx, pods, client.InNamespace(namespace))
Expect(err).NotTo(HaveOccurred())

for _, pod := range pods.Items {
if !existsSchedulingGate(&pod) && len(pod.Status.ContainerStatuses) == 0 {
updatePodStatus(&pod, corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "RunContainerError"}})

node := &corev1.Node{}
err = k8sClient.Get(ctx, client.ObjectKey{Name: pod.Status.HostIP}, node)
Expect(err).NotTo(HaveOccurred())
updateNodeImageStatus(node, pod.Spec.Containers)
break
}
}
Expand All @@ -263,7 +439,33 @@ func updatePodStatus(pod *corev1.Pod, state corev1.ContainerState) {
State: state,
},
}
pod.Status.HostIP = pod.GetObjectMeta().GetName() + "-node"
podName := pod.GetObjectMeta().GetName()
regex, err := regexp.Compile(`pod-\d+$`)
Expect(err).NotTo(HaveOccurred())
idx := regex.FindStringIndex(podName)
nodeName := podName[:idx[0]] + strings.Replace(podName[idx[0]:], "pod", "node", 1)
pod.Status.HostIP = nodeName
err = k8sClient.Status().Update(ctx, pod)
Expect(err).NotTo(HaveOccurred())
}

func updatePodStatusWithHostIP(pod *corev1.Pod, state corev1.ContainerState, nodeName string) {
pod.Status.ContainerStatuses = []corev1.ContainerStatus{
{
State: state,
},
}
pod.Status.HostIP = nodeName
err := k8sClient.Status().Update(ctx, pod)
Expect(err).NotTo(HaveOccurred())
}

func updateNodeImageStatus(node *corev1.Node, containers []corev1.Container) {
for _, container := range containers {
node.Status.Images = append(node.Status.Images, corev1.ContainerImage{
Names: []string{container.Image},
})
}
err := k8sClient.Status().Update(ctx, node)
Expect(err).NotTo(HaveOccurred())
}

0 comments on commit b09b87a

Please sign in to comment.