Skip to content

Commit

Permalink
Generate dynamic k8s client only once
Browse files Browse the repository at this point in the history
Signed-off-by: Mathieu Cesbron <[email protected]>
  • Loading branch information
MathieuCesbron committed Jan 17, 2024
1 parent 169194a commit 1875c92
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 32 deletions.
10 changes: 5 additions & 5 deletions controllers/rediscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request
// Step 3 Rebalance the cluster
k8sutils.RebalanceRedisCluster(r.K8sClient, r.Log, instance)
reqLogger.Info("Redis cluster is downscaled... Rebalancing the cluster is done")
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, leaderReplicas)
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, leaderReplicas, r.Dk8sClient)

Check warning on line 104 in controllers/rediscluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/rediscluster_controller.go#L104

Added line #L104 was not covered by tests
if err != nil {
return ctrl.Result{}, err
}
Expand All @@ -110,7 +110,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request

// Mark the cluster status as initializing if there are no leader or follower nodes
if instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0 {
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, 0, 0)
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterLeaderReason, 0, 0, r.Dk8sClient)

Check warning on line 113 in controllers/rediscluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/rediscluster_controller.go#L113

Added line #L113 was not covered by tests
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -144,7 +144,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request

// Mark the cluster status as initializing if there are no follower nodes
if instance.Status.ReadyLeaderReplicas == 0 && instance.Status.ReadyFollowerReplicas == 0 {
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, 0)
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterInitializing, status.InitializingClusterFollowerReason, leaderReplicas, 0, r.Dk8sClient)

Check warning on line 147 in controllers/rediscluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/rediscluster_controller.go#L147

Added line #L147 was not covered by tests
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -185,7 +185,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request

// Mark the cluster status as bootstrapping if all the leader and follower nodes are ready
if !(instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas) {
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas)
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterBootstrap, status.BootstrapClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient)

Check warning on line 188 in controllers/rediscluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/rediscluster_controller.go#L188

Added line #L188 was not covered by tests
if err != nil {
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -236,7 +236,7 @@ func (r *RedisClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request

// Mark the cluster status as ready if all the leader and follower nodes are ready
if instance.Status.ReadyLeaderReplicas == leaderReplicas && instance.Status.ReadyFollowerReplicas == followerReplicas {
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas)
err = k8sutils.UpdateRedisClusterStatus(instance, status.RedisClusterReady, status.ReadyClusterReason, leaderReplicas, followerReplicas, r.Dk8sClient)

Check warning on line 239 in controllers/rediscluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/rediscluster_controller.go#L239

Added line #L239 was not covered by tests
if err != nil {
return ctrl.Result{}, err
}
Expand Down
2 changes: 1 addition & 1 deletion controllers/redissentinel_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func (r *RedisSentinelReconciler) Reconcile(ctx context.Context, req ctrl.Reques
}

// Create Redis Sentinel
err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient)
err = k8sutils.CreateRedisSentinel(ctx, r.K8sClient, r.Log, instance, r.K8sClient, r.Dk8sClient)

Check warning on line 58 in controllers/redissentinel_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/redissentinel_controller.go#L58

Added line #L58 was not covered by tests
if err != nil {
return ctrl.Result{}, err
}
Expand Down
27 changes: 11 additions & 16 deletions k8sutils/redis-sentinel.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)

Expand All @@ -37,7 +38,7 @@ type RedisReplicationObject struct {
}

// Redis Sentinel Create the Redis Sentinel Setup
func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error {
func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error {

Check warning on line 41 in k8sutils/redis-sentinel.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis-sentinel.go#L41

Added line #L41 was not covered by tests
prop := RedisSentinelSTS{
RedisStateFulType: "sentinel",
Affinity: cr.Spec.Affinity,
Expand All @@ -50,7 +51,7 @@ func CreateRedisSentinel(ctx context.Context, client kubernetes.Interface, logge
prop.ExternalConfig = cr.Spec.RedisSentinelConfig.AdditionalSentinelConfig
}

return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl)
return prop.CreateRedisSentinelSetup(ctx, client, logger, cr, cl, dcl)

Check warning on line 54 in k8sutils/redis-sentinel.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis-sentinel.go#L54

Added line #L54 was not covered by tests

}

Expand All @@ -64,7 +65,7 @@ func CreateRedisSentinelService(cr *redisv1beta2.RedisSentinel, cl kubernetes.In
}

// Create Redis Sentinel Cluster Setup
func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface) error {
func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, cl kubernetes.Interface, dcl dynamic.Interface) error {

Check warning on line 68 in k8sutils/redis-sentinel.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis-sentinel.go#L68

Added line #L68 was not covered by tests
stateFulName := cr.ObjectMeta.Name + "-" + service.RedisStateFulType
labels := getRedisLabels(stateFulName, sentinel, service.RedisStateFulType, cr.ObjectMeta.Labels)
annotations := generateStatefulSetsAnots(cr.ObjectMeta, cr.Spec.KubernetesConfig.IgnoreAnnotations)
Expand All @@ -75,7 +76,7 @@ func (service RedisSentinelSTS) CreateRedisSentinelSetup(ctx context.Context, cl
generateRedisSentinelParams(cr, service.getSentinelCount(cr), service.ExternalConfig, service.Affinity),
redisSentinelAsOwner(cr),
generateRedisSentinelInitContainerParams(cr),
generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe),
generateRedisSentinelContainerParams(ctx, client, logger, cr, service.ReadinessProbe, service.LivenessProbe, dcl),

Check warning on line 79 in k8sutils/redis-sentinel.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/redis-sentinel.go#L79

Added line #L79 was not covered by tests
cr.Spec.Sidecars,
cl,
)
Expand Down Expand Up @@ -145,7 +146,7 @@ func generateRedisSentinelInitContainerParams(cr *redisv1beta2.RedisSentinel) in
}

// Create Redis Sentinel Statefulset Container Params
func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe) containerParameters {
func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, readinessProbeDef *commonapi.Probe, livenessProbeDef *commonapi.Probe, dcl dynamic.Interface) containerParameters {
trueProperty := true
falseProperty := false
containerProp := containerParameters{
Expand All @@ -154,7 +155,7 @@ func generateRedisSentinelContainerParams(ctx context.Context, client kubernetes
ImagePullPolicy: cr.Spec.KubernetesConfig.ImagePullPolicy,
Resources: cr.Spec.KubernetesConfig.Resources,
SecurityContext: cr.Spec.SecurityContext,
AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr),
AdditionalEnvVariable: getSentinelEnvVariable(ctx, client, logger, cr, dcl),
}
if cr.Spec.EnvVars != nil {
containerProp.EnvVars = cr.Spec.EnvVars
Expand Down Expand Up @@ -245,7 +246,7 @@ func (service RedisSentinelService) CreateRedisSentinelService(cr *redisv1beta2.

}

func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) *[]corev1.EnvVar {
func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) *[]corev1.EnvVar {
if cr.Spec.RedisSentinelConfig == nil {
return &[]corev1.EnvVar{}
}
Expand All @@ -257,7 +258,7 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo
},
{
Name: "IP",
Value: getRedisReplicationMasterIP(ctx, client, logger, cr),
Value: getRedisReplicationMasterIP(ctx, client, logger, cr, dcl),
},
{
Name: "PORT",
Expand Down Expand Up @@ -291,21 +292,15 @@ func getSentinelEnvVariable(ctx context.Context, client kubernetes.Interface, lo

}

func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel) string {
dClient, err := GenerateK8sDynamicClient(GenerateK8sConfig)
if err != nil {
logger.Error(err, "Failed to generate dynamic client")
return ""
}

func getRedisReplicationMasterIP(ctx context.Context, client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisSentinel, dcl dynamic.Interface) string {
replicationName := cr.Spec.RedisSentinelConfig.RedisReplicationName
replicationNamespace := cr.Namespace

var replicationInstance redisv1beta2.RedisReplication
var realMasterPod string

// Get Request on Dynamic Client
customObject, err := dClient.Resource(schema.GroupVersionResource{
customObject, err := dcl.Resource(schema.GroupVersionResource{
Group: "redis.redis.opstreelabs.in",
Version: "v1beta2",
Resource: "redisreplications",
Expand Down
9 changes: 6 additions & 3 deletions k8sutils/redis-sentinel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,21 @@ package k8sutils

import (
"context"
"k8s.io/client-go/kubernetes"
"os"
"path/filepath"
"reflect"
"testing"

"k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"

common "github.com/OT-CONTAINER-KIT/redis-operator/api"
redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/utils/pointer"
)
Expand Down Expand Up @@ -185,7 +188,7 @@ func Test_generateRedisSentinelContainerParams(t *testing.T) {
t.Fatalf("Failed to unmarshal file %s: %v", path, err)
}

actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil)
actual := generateRedisSentinelContainerParams(context.TODO(), nil, logr.Logger{}, input, nil, nil, nil)
assert.EqualValues(t, expected, actual, "Expected %+v, got %+v", expected, actual)
}

Expand Down Expand Up @@ -326,7 +329,7 @@ func Test_getSentinelEnvVariable(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getSentinelEnvVariable(tt.args.ctx, tt.args.client, tt.args.logger, tt.args.cr); !reflect.DeepEqual(got, tt.want) {
if got := getSentinelEnvVariable(tt.args.ctx, tt.args.client, tt.args.logger, tt.args.cr, fake.NewSimpleDynamicClient(&runtime.Scheme{})); !reflect.DeepEqual(got, tt.want) {
t.Errorf("getSentinelEnvVariable() = %v, want %v", got, tt.want)
}
})
Expand Down
10 changes: 3 additions & 7 deletions k8sutils/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
)

// statusLogger will generate logging interface for status
Expand All @@ -19,18 +20,13 @@ func statusLogger(namespace string, name string) logr.Logger {
}

// UpdateRedisClusterStatus will update the status of the RedisCluster
func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.RedisClusterState, resaon string, readyLeaderReplicas, readyFollowerReplicas int32) error {
func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.RedisClusterState, resaon string, readyLeaderReplicas, readyFollowerReplicas int32, dcl dynamic.Interface) error {

Check warning on line 23 in k8sutils/status.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/status.go#L23

Added line #L23 was not covered by tests
logger := statusLogger(cr.Namespace, cr.Name)
cr.Status.State = status
cr.Status.Reason = resaon
cr.Status.ReadyLeaderReplicas = readyLeaderReplicas
cr.Status.ReadyFollowerReplicas = readyFollowerReplicas

client, err := GenerateK8sDynamicClient(GenerateK8sConfig)
if err != nil {
logger.Error(err, "Failed to generate k8s dynamic client")
return err
}
gvr := schema.GroupVersionResource{
Group: "redis.redis.opstreelabs.in",
Version: "v1beta2",
Expand All @@ -43,7 +39,7 @@ func UpdateRedisClusterStatus(cr *redisv1beta2.RedisCluster, status status.Redis
}
unstructuredRedisCluster := &unstructured.Unstructured{Object: unstructuredObj}

_, err = client.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{})
_, err = dcl.Resource(gvr).Namespace(cr.Namespace).UpdateStatus(context.TODO(), unstructuredRedisCluster, metav1.UpdateOptions{})

Check warning on line 42 in k8sutils/status.go

View check run for this annotation

Codecov / codecov/patch

k8sutils/status.go#L42

Added line #L42 was not covered by tests
if err != nil {
logger.Error(err, "Failed to update status")
return err
Expand Down

0 comments on commit 1875c92

Please sign in to comment.