Skip to content

Commit

Permalink
fix: ensure hub leader configmap is deleted with nodepool (#2324)
Browse files Browse the repository at this point in the history
Co-authored-by: Simon Tien <[email protected]>
  • Loading branch information
tnsimon and Simon Tien authored Feb 17, 2025
1 parent e5d69ba commit 9f0b43e
Show file tree
Hide file tree
Showing 5 changed files with 217 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ func Add(ctx context.Context, cfg *appconfig.CompletedConfig, mgr manager.Manage
return ok
},
DeleteFunc: func(e event.DeleteEvent) bool {
return true
return false
},
UpdateFunc: func(e event.UpdateEvent) bool {
oldPool, ok := e.ObjectOld.(*appsv1beta2.NodePool)
Expand Down Expand Up @@ -147,18 +147,7 @@ func (r *ReconcileHubLeaderConfig) Reconcile(
return reconcile.Result{}, client.IgnoreNotFound(err)
}

if !nodepool.ObjectMeta.DeletionTimestamp.IsZero() {
// If the NodePool is being deleted, delete the leader configmap
configMapName := fmt.Sprintf("leader-hub-%s", nodepool.Name)
err := r.Client.Delete(ctx, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Namespace: r.Configuration.HubLeaderNamespace,
},
})
if err != nil && !errors.IsNotFound(err) {
return reconcile.Result{}, err
}
if nodepool.ObjectMeta.DeletionTimestamp != nil {
return reconcile.Result{}, nil
}

Expand Down Expand Up @@ -222,6 +211,14 @@ func (r *ReconcileHubLeaderConfig) reconcileHubLeaderConfig(
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): configMapName,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: nodepool.APIVersion,
Kind: nodepool.Kind,
Name: nodepool.Name,
UID: nodepool.UID,
},
},
},
Data: data,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-hangzhou",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "hangzhou",
},
},
},
Data: map[string]string{
"leaders": "node1/10.0.0.1",
Expand Down Expand Up @@ -153,6 +158,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-shanghai",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "shanghai",
},
},
},
Data: map[string]string{
"leaders": "node1/10.0.0.1,node2/10.0.0.2",
Expand Down Expand Up @@ -210,6 +220,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-shanghai",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "shanghai",
},
},
},
Data: map[string]string{
"leaders": "node1/10.0.0.1",
Expand All @@ -225,6 +240,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-shanghai",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "shanghai",
},
},
},
Data: map[string]string{
"leaders": "node1/10.0.0.1,node2/10.0.0.2",
Expand Down Expand Up @@ -274,6 +294,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-beijing",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "beijing",
},
},
},
Data: map[string]string{
"leaders": "",
Expand Down Expand Up @@ -320,6 +345,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-beijing",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "beijing",
},
},
},
Data: map[string]string{
"leaders": "node1/10.0.0.1,node2/10.0.0.2",
Expand Down Expand Up @@ -366,6 +396,11 @@ func TestReconcile(t *testing.T) {
Labels: map[string]string{
projectinfo.GetHubLeaderConfigMapLabel(): "leader-hub-beijing",
},
OwnerReferences: []metav1.OwnerReference{
{
Name: "beijing",
},
},
},
Data: map[string]string{
"leaders": "node1/10.0.0.1,node2/10.0.0.2",
Expand Down
3 changes: 0 additions & 3 deletions pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,5 @@ func (webhook *NodePoolHandler) Default(ctx context.Context, obj runtime.Object)
}
}

// Set default enable leader election to false
np.Spec.EnableLeaderElection = false

return nil
}
62 changes: 62 additions & 0 deletions pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,6 +612,68 @@ func TestDefault(t *testing.T) {
},
},
},
"nodepool has enable leader election enabled": {
obj: &v1beta2.NodePool{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1beta2.NodePoolSpec{
HostNetwork: true,
Type: v1beta2.Cloud,
LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark),
LeaderReplicas: 3,
EnableLeaderElection: true,
PoolScopeMetadata: []metav1.GroupVersionResource{
{
Group: "discovery.k8s.io",
Version: "v1",
Resource: "Endpoints",
},
},
},
},
wantedNodePool: &v1beta2.NodePool{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{
"foo": "bar",
"nodepool.openyurt.io/type": "cloud",
},
},
Spec: v1beta2.NodePoolSpec{
HostNetwork: true,
Type: v1beta2.Cloud,
LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark),
LeaderReplicas: 3,
EnableLeaderElection: true,
PoolScopeMetadata: []metav1.GroupVersionResource{
{
Group: "discovery.k8s.io",
Version: "v1",
Resource: "Endpoints",
},
{
Group: "",
Version: "v1",
Resource: "services",
},
{
Group: "discovery.k8s.io",
Version: "v1",
Resource: "endpointslices",
},
},
},
Status: v1beta2.NodePoolStatus{
ReadyNodeNum: 0,
UnreadyNodeNum: 0,
Nodes: []string{},
},
},
},
}

for k, tc := range testcases {
Expand Down
127 changes: 110 additions & 17 deletions test/e2e/yurt/hubleader.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,15 @@ package yurt
import (
"cmp"
"context"
"fmt"
"slices"
"strings"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
Expand Down Expand Up @@ -91,23 +93,6 @@ var _ = Describe("Test hubleader elections", Serial, func() {
return expectedLeaders
}

// getExpectedLeaderConfig returns the expected leader config map data
getExpectedLeaderConfig := func(leaders []v1beta2.Leader) map[string]string {
expectedLeaderConfig := make(map[string]string)

leaderEndpoints := make([]string, 0, len(leaders))
for _, leader := range leaders {
leaderEndpoints = append(leaderEndpoints, leader.NodeName+"/"+leader.Address)
}

expectedLeaderConfig["leaders"] = strings.Join(leaderEndpoints, ",")
expectedLeaderConfig["pool-scoped-metadata"] = "/v1/services,discovery.k8s.io/v1/endpointslices"
expectedLeaderConfig["interconnectivity"] = "true"
expectedLeaderConfig["enable-leader-election"] = "true"

return expectedLeaderConfig
}

getActualLeaders := func() []v1beta2.Leader {
pool, err := util.GetNodepool(ctx, k8sClient, nodePoolName)
if err != nil {
Expand Down Expand Up @@ -254,3 +239,111 @@ var _ = Describe("Test hubleader elections", Serial, func() {
})
})
})

var _ = Describe("Hub leader config owner cleanup", func() {
ctx := context.Background()
var k8sClient client.Client

BeforeEach(func() {
k8sClient = ycfg.YurtE2eCfg.RuntimeClient
})

AfterEach(func() {})

It("Should delete hub leader config when nodepool is deleted", func() {
npName := fmt.Sprintf("test-%d", time.Now().Unix())
pool := util.TestNodePool{
NodePool: v1beta2.NodePool{
ObjectMeta: metav1.ObjectMeta{
Name: npName,
},
Spec: v1beta2.NodePoolSpec{
EnableLeaderElection: true,
InterConnectivity: true,
Type: v1beta2.Edge,
},
},
}

By("Creating a new empty nodepool")
Eventually(
func() error {
return util.InitTestNodePool(ctx, k8sClient, pool)
},
time.Second*5, time.Millisecond*500).Should(BeNil())

By("Nodepool should be created")
Eventually(
func() error {
_, err := util.GetNodepool(ctx, k8sClient, pool.NodePool.Name)
if err != nil {
return err
}
return nil
},
time.Second*5, time.Millisecond*500).Should(BeNil())

By("Leader config map should be created")
Eventually(
getActualLeaderConfig,
time.Second*5,
time.Millisecond*500,
).WithArguments(ctx, k8sClient, npName).Should(Equal(getExpectedLeaderConfig([]v1beta2.Leader{})))

By("Delete the nodepool")
Eventually(
func() error {
return util.DeleteNodePool(ctx, k8sClient, npName)
},
time.Second*5, time.Millisecond*500).Should(BeNil())

By("Leader config map should be not found")
Eventually(
func() error {
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: "leader-hub-" + npName, Namespace: metav1.NamespaceSystem},
&v1.ConfigMap{},
)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
return fmt.Errorf("leader config map still exists")
},
time.Second*30, time.Millisecond*500).Should(BeNil())
})
})

// getActualLeaderConfig returns the actual leader config map data
func getActualLeaderConfig(ctx context.Context, k8sClient client.Client, nodePoolName string) map[string]string {
configMap := v1.ConfigMap{}
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: "leader-hub-" + nodePoolName, Namespace: metav1.NamespaceSystem},
&configMap,
)
if err != nil {
return nil
}
return configMap.Data
}

// getExpectedLeaderConfig returns the expected leader config map data
func getExpectedLeaderConfig(leaders []v1beta2.Leader) map[string]string {
expectedLeaderConfig := make(map[string]string)

leaderEndpoints := make([]string, 0, len(leaders))
for _, leader := range leaders {
leaderEndpoints = append(leaderEndpoints, leader.NodeName+"/"+leader.Address)
}

expectedLeaderConfig["leaders"] = strings.Join(leaderEndpoints, ",")
expectedLeaderConfig["pool-scoped-metadata"] = "/v1/services,discovery.k8s.io/v1/endpointslices"
expectedLeaderConfig["interconnectivity"] = "true"
expectedLeaderConfig["enable-leader-election"] = "true"

return expectedLeaderConfig
}

0 comments on commit 9f0b43e

Please sign in to comment.