diff --git a/controllers/rediscluster_controller.go b/controllers/rediscluster_controller.go index d9a123c2a..7b10eee15 100644 --- a/controllers/rediscluster_controller.go +++ b/controllers/rediscluster_controller.go @@ -101,7 +101,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Step 3 Rebalance the cluster k8sutils.RebalanceRedisCluster(r.K8sClient, r.Log, instance) reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster is done") - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, leaderReplicas) + err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, leaderReplicas, r.Dk8sClient) if err != nil { return ctrl.Result{}, err } @@ -110,7 +110,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Mark the cluster status as initializing if there are no leader or follower nodes if instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0 { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, 0, 0) + err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, 0, 0, r.Dk8sClient) if err != nil { return ctrl.Result{}, err } @@ -144,7 +144,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Mark the cluster status as initializing if there are no follower nodes if instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0 { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, 0) + err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, 0, r.Dk8sClient) if err != nil { return ctrl.Result{}, err } @@ -185,7 +185,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Mark the cluster status as bootstrapping if all the leader and follower nodes are ready if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas) + err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { return ctrl.Result{}, err } @@ -236,7 +236,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request // Mark the cluster status as ready if all the leader and follower nodes are ready if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas { - err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas) + err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient) if err != nil { return ctrl.Result{}, err } diff --git a/controllers/redissentinel_controller.go b/controllers/redissentinel_controller.go index 016135770..42b117672 100644 --- a/controllers/redissentinel_controller.go +++ b/controllers/redissentinel_controller.go @@ -55,7 +55,7 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Create Redis Sentinel - err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient) + err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient, r.Dk8sClient) if err != nil { return ctrl.Result{}, err } diff --git a/k8sutils/redis-sentinel.go b/k8sutils/redis-sentinel.go index 5ae9dbef1..698a19980 100644 --- a/k8sutils/redis-sentinel.go +++ b/k8sutils/redis-sentinel.go @@ -14,6 +14,7 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) @@ -37,7 +38,7 @@ type RedisReplicationObject struct { } // Redis Sentinel Create the Redis Sentinel Setup -func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { prop := RedisSentinelSTS{ RedisStateFulType: "sentinel", Affinity: cr.Spec.Affinity, @@ -50,7 +51,7 @@ func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logge prop.ExternalConfig = cr.Spec.RedisSentinelConfig.AdditionalSentinelConfig } - return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl) + return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl, dcl) } @@ -64,7 +65,7 @@ func CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.In } // Create Redis Sentinel Cluster Setup -func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error { +func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error { stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType labels := getRedisLabels(stateFulName, sentinel, service.RedisStateFulType, cr.ObjectMeta.Labels) annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations) @@ -75,7 +76,7 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity), redisSentinelAsOwner(cr), generateRedisSentinelInitContainerParams(cr), - generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe), + generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl), cr.Spec.Sidecars, cl, ) @@ -145,7 +146,7 @@ func generateRedisSentinelInitContainerParams(cr *redisv1beta2.RedisSentinel) in } // Create Redis Sentinel Statefulset Container Params -func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe) containerParameters { +func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe, dcl dynamic.Interface) containerParameters { trueProperty := true falseProperty := false containerProp := containerParameters{ @@ -154,7 +155,7 @@ func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes ImagePullPolicy: cr.Spec.KubernetesConfig.ImagePullPolicy, Resources: cr.Spec.KubernetesConfig.Resources, SecurityContext: cr.Spec.SecurityContext, - AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr), + AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr, dcl), } if cr.Spec.EnvVars != nil { containerProp.EnvVars = cr.Spec.EnvVars @@ -245,7 +246,7 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2. } -func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) *[]corev1.EnvVar { +func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar { if cr.Spec.RedisSentinelConfig == nil { return &[]corev1.EnvVar{} } @@ -257,7 +258,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo }, { Name: "IP", - Value: getRedisReplicationMasterIP(ctx, client, logger, cr), + Value: getRedisReplicationMasterIP(ctx, client, logger, cr, dcl), }, { Name: "PORT", @@ -291,13 +292,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo } -func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) string { - dClient, err := GenerateK8sDynamicClient(GenerateK8sConfig) - if err != nil { - logger.Error(err, "Failed to generate dynamic client") - return "" - } - +func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string { replicationName := cr.Spec.RedisSentinelConfig.RedisReplicationName replicationNamespace := cr.Namespace @@ -305,7 +300,7 @@ func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interfac var realMasterPod string // Get Request on Dynamic Client - customObject, err := dClient.Resource(schema.GroupVersionResource{ + customObject, err := dcl.Resource(schema.GroupVersionResource{ Group: "redis.redis.opstreelabs.in", Version: "v1beta2", Resource: "redisreplications", diff --git a/k8sutils/redis-sentinel_test.go b/k8sutils/redis-sentinel_test.go index f30d7a267..85260d5b1 100644 --- a/k8sutils/redis-sentinel_test.go +++ b/k8sutils/redis-sentinel_test.go @@ -2,18 +2,21 @@ package k8sutils import ( "context" - "k8s.io/client-go/kubernetes" "os" "path/filepath" "reflect" "testing" + "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes" + common "github.com/OT-CONTAINER-KIT/redis-operator/api" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/utils/pointer" ) @@ -185,7 +188,7 @@ func Test_generateRedisSentinelContainerParams(t *testing.T) { t.Fatalf("Failed to unmarshal file %s: %v", path, err) } - actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil) + actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil, nil) assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual) } @@ -326,7 +329,7 @@ func Test_getSentinelEnvVariable(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := getSentinelEnvVariable(tt.args.ctx, tt.args.client, tt.args.logger, tt.args.cr); !reflect.DeepEqual(got, tt.want) { + if got := getSentinelEnvVariable(tt.args.ctx, tt.args.client, tt.args.logger, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) { t.Errorf("getSentinelEnvVariable() = %v, want %v", got, tt.want) } }) diff --git a/k8sutils/status.go b/k8sutils/status.go index d652ad7c7..2beb2da91 100644 --- a/k8sutils/status.go +++ b/k8sutils/status.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" ) // statusLogger will generate logging interface for status @@ -19,18 +20,13 @@ func statusLogger(namespace string, name string) logr.Logger { } // UpdateRedisClusterStatus will update the status of the RedisCluster -func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.RedisClusterState, resaon string, readyLeaderReplicas, readyFollowerReplicas int32) error { +func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.RedisClusterState, resaon string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error { logger := statusLogger(cr.Namespace, cr.Name) cr.Status.State = status cr.Status.Reason = resaon cr.Status.ReadyLeaderReplicas = readyLeaderReplicas cr.Status.ReadyFollowerReplicas = readyFollowerReplicas - client, err := GenerateK8sDynamicClient(GenerateK8sConfig) - if err != nil { - logger.Error(err, "Failed to generate k8s dynamic client") - return err - } gvr := schema.GroupVersionResource{ Group: "redis.redis.opstreelabs.in", Version: "v1beta2", @@ -43,7 +39,7 @@ func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.Redis } unstructuredRedisCluster := &unstructured.Unstructured{Object: unstructuredObj} - _, err = client.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{}) + _, err = dcl.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{}) if err != nil { logger.Error(err, "Failed to update status") return err