Skip to content

Commit

Permalink
feat: rolling update sequence from leader to follower (#966)
Browse files Browse the repository at this point in the history
* feat: rolling update sequence from leader to follower

Signed-off-by: drivebyer <[email protected]>

* fix

Signed-off-by: drivebyer <[email protected]>

* add suffix

Signed-off-by: drivebyer <[email protected]>

* fix test

Signed-off-by: drivebyer <[email protected]>

* fix lint

Signed-off-by: drivebyer <[email protected]>

---------

Signed-off-by: drivebyer <[email protected]>
  • Loading branch information
drivebyer authored Jun 5, 2024
1 parent 96a5ccf commit 92ac24f
Show file tree
Hide file tree
Showing 9 changed files with 257 additions and 16 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/publish-image.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,6 @@ jobs:
file: Dockerfile
push: true
tags: |
${{ env.REGISTRY }}/${{ env.REPOSITORY }}:${{ env.TAG }}
${{ env.REGISTRY }}/${{ env.REPOSITORY }}:latest
${{ env.REGISTRY }}/${{ env.REPOSITORY }}/redis-operator:${{ env.TAG }}
${{ env.REGISTRY }}/${{ env.REPOSITORY }}/redis-operator:latest
platforms: linux/amd64,linux/arm64
11 changes: 7 additions & 4 deletions controllers/rediscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
// RedisClusterReconciler reconciles a RedisCluster object
type RedisClusterReconciler struct {
client.Client
k8sutils.StatefulSet
K8sClient kubernetes.Interface
Dk8sClient dynamic.Interface
Log logr.Logger
Expand Down Expand Up @@ -125,6 +126,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{}, err
}

// todo: remove me after watch statefulset in controller
redisLeaderInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-leader")
if err != nil {
if errors.IsNotFound(err) {
Expand All @@ -133,7 +135,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{}, err
}

if redisLeaderInfo.Status.ReadyReplicas == leaderReplicas {
if r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") {
// Mark the cluster status as initializing if there are no follower nodes
if (instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0) ||
instance.Status.ReadyFollowerReplicas != followerReplicas {
Expand All @@ -158,6 +160,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{}, err
}
}
// todo: remove me after watch statefulset in controller
redisFollowerInfo, err := k8sutils.GetStatefulSet(r.K8sClient, r.Log, instance.GetNamespace(), instance.GetName()+"-follower")
if err != nil {
if errors.IsNotFound(err) {
Expand All @@ -171,9 +174,9 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
return ctrl.Result{RequeueAfter: time.Second * 60}, nil
}

if !(redisLeaderInfo.Status.ReadyReplicas == leaderReplicas && redisFollowerInfo.Status.ReadyReplicas == followerReplicas) {
reqLogger.Info("Redis leader and follower nodes are not ready yet", "Ready.Replicas", strconv.Itoa(int(redisLeaderInfo.Status.ReadyReplicas)), "Expected.Replicas", leaderReplicas)
return ctrl.Result{RequeueAfter: time.Second * 60}, nil
if !(r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-leader") && r.IsStatefulSetReady(ctx, instance.Namespace, instance.Name+"-follower")) {
reqLogger.Info("Redis leader and follower nodes are not ready yet")
return ctrl.Result{RequeueAfter: time.Second * 30}, nil
}

// Mark the cluster status as bootstrapping if all the leader and follower nodes are ready
Expand Down
12 changes: 7 additions & 5 deletions controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ import (
"testing"
"time"

// redisv1beta1 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta1"
redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2"
"github.com/OT-CONTAINER-KIT/redis-operator/k8sutils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
Expand Down Expand Up @@ -101,11 +101,13 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())

rrLog := ctrl.Log.WithName("controllers").WithName("RedisReplication")
err = (&RedisClusterReconciler{
Client: k8sManager.GetClient(),
K8sClient: k8sClient,
Dk8sClient: dk8sClient,
Scheme: k8sManager.GetScheme(),
Client: k8sManager.GetClient(),
K8sClient: k8sClient,
Dk8sClient: dk8sClient,
Scheme: k8sManager.GetScheme(),
StatefulSet: k8sutils.NewStatefulSetService(k8sClient, rrLog),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())

Expand Down
54 changes: 54 additions & 0 deletions k8sutils/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,60 @@ import (
"k8s.io/utils/ptr"
)

type StatefulSet interface {
IsStatefulSetReady(ctx context.Context, namespace, name string) bool
}

type StatefulSetService struct {
kubeClient kubernetes.Interface
log logr.Logger
}

func NewStatefulSetService(kubeClient kubernetes.Interface, log logr.Logger) *StatefulSetService {
log = log.WithValues("service", "k8s.statefulset")
return &StatefulSetService{
kubeClient: kubeClient,
log: log,
}
}

func (s *StatefulSetService) IsStatefulSetReady(ctx context.Context, namespace, name string) bool {
var (
partition = 0
replicas = 1

logger = s.log.WithValues("namespace", namespace, "name", name)
)

sts, err := s.kubeClient.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
logger.Error(err, "failed to get statefulset")
return false
}

if sts.Spec.UpdateStrategy.RollingUpdate != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition)
}
if sts.Spec.Replicas != nil {
replicas = int(*sts.Spec.Replicas)
}

if expectedUpdateReplicas := replicas - partition; sts.Status.UpdatedReplicas < int32(expectedUpdateReplicas) {
logger.V(1).Info("StatefulSet is not ready", "Status.UpdatedReplicas", sts.Status.UpdatedReplicas, "ExpectedUpdateReplicas", expectedUpdateReplicas)
return false
}
if partition == 0 && sts.Status.CurrentRevision != sts.Status.UpdateRevision {
logger.V(1).Info("StatefulSet is not ready", "Status.CurrentRevision", sts.Status.CurrentRevision, "Status.UpdateRevision", sts.Status.UpdateRevision)
return false
}
if sts.Status.ObservedGeneration != sts.ObjectMeta.Generation {
logger.V(1).Info("StatefulSet is not ready", "Status.ObservedGeneration", sts.Status.ObservedGeneration, "ObjectMeta.Generation", sts.ObjectMeta.Generation)
return false
}

return true
}

const (
redisExporterContainer = "redis-exporter"
)
Expand Down
12 changes: 7 additions & 5 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,12 +124,14 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "Redis")
os.Exit(1)
}
rcLog := ctrl.Log.WithName("controllers").WithName("RedisCluster")
if err = (&controllers.RedisClusterReconciler{
Client: mgr.GetClient(),
K8sClient: k8sclient,
Dk8sClient: dk8sClient,
Log: ctrl.Log.WithName("controllers").WithName("RedisCluster"),
Scheme: mgr.GetScheme(),
Client: mgr.GetClient(),
K8sClient: k8sclient,
Dk8sClient: dk8sClient,
Log: rcLog,
Scheme: mgr.GetScheme(),
StatefulSet: k8sutils.NewStatefulSetService(k8sclient, rcLog),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "RedisCluster")
os.Exit(1)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
name: redis-cluster
spec:
steps:
- try:
- apply:
file: cluster.yaml
- assert:
file: ready-cluster.yaml

- name: Try saving a key
try:
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli -c -p 6379 set foo-0 bar-0
check:
($stdout=='OK'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli -c -p 6379 set foo-1 bar-1
check:
($stdout=='OK'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli -c -p 6379 set foo-2 bar-2
check:
($stdout=='OK'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli -c -p 6379 set foo-3 bar-3
check:
($stdout=='OK'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli -c -p 6379 set foo-4 bar-4
check:
($stdout=='OK'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli -c -p 6379 set foo-5 bar-5
check:
($stdout=='OK'): true

- name: Rolling update the cluster
try:
- apply:
file: cluster-hscale.yaml
- assert:
file: ready-cluster.yaml

- name: Check if all keys exist
try:
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-0 -- redis-cli -c -p 6379 get foo-0
check:
($stdout=='bar-0'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-1 -- redis-cli -c -p 6379 get foo-1
check:
($stdout=='bar-1'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-leader redis-cluster-v1beta2-leader-2 -- redis-cli -c -p 6379 get foo-2
check:
($stdout=='bar-2'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-0 -- redis-cli -c -p 6379 get foo-3
check:
($stdout=='bar-3'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-1 -- redis-cli -c -p 6379 get foo-4
check:
($stdout=='bar-4'): true
- script:
timeout: 30s
content: >
kubectl exec --namespace ${NAMESPACE} --container redis-cluster-v1beta2-follower redis-cluster-v1beta2-follower-2 -- redis-cli -c -p 6379 get foo-5
check:
($stdout=='bar-5'): true
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
---
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster
metadata:
name: redis-cluster-v1beta2
spec:
clusterSize: 3
clusterVersion: v7
persistenceEnabled: false
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v7.0.12
imagePullPolicy: Always
resources:
requests:
cpu: 101m
memory: 128Mi
limits:
cpu: 101m
memory: 256Mi # Increased memory limit
storage:
volumeClaimTemplate:
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 1Gi
nodeConfVolume: true
nodeConfVolumeClaimTemplate:
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 1Gi
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
---
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster
metadata:
name: redis-cluster-v1beta2
spec:
clusterSize: 3
clusterVersion: v7
persistenceEnabled: false
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
kubernetesConfig:
image: quay.io/opstree/redis:v7.0.12
imagePullPolicy: Always
resources:
requests:
cpu: 101m
memory: 128Mi
limits:
cpu: 101m
memory: 128Mi
storage:
volumeClaimTemplate:
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 1Gi
nodeConfVolume: true
nodeConfVolumeClaimTemplate:
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 1Gi
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster
metadata:
name: redis-cluster-v1beta2
status:
readyFollowerReplicas: 3
readyLeaderReplicas: 3
state: Ready
reason: RedisCluster is ready

0 comments on commit 92ac24f

Please sign in to comment.