diff --git a/api/v1alpha1/gitopscluster_types.go b/api/v1alpha1/gitopscluster_types.go index 026cefa..6906f5b 100644 --- a/api/v1alpha1/gitopscluster_types.go +++ b/api/v1alpha1/gitopscluster_types.go @@ -17,11 +17,15 @@ limitations under the License. package v1alpha1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/meta" ) +const defaultWaitDuration = time.Second * 60 + // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -33,6 +37,11 @@ type GitopsClusterSpec struct { // CAPIClusterRef specifies the CAPI Cluster. // +optional CAPIClusterRef *meta.LocalObjectReference `json:"capiClusterRef,omitempty"` + // When checking for readiness, this is the time to wait before + // checking again. + //+kubebuilder:default:60s + //+optional + ClusterReadinessBackoff *metav1.Duration `json:"clusterReadinessBackoff,omitempty"` } // GitopsClusterStatus defines the observed state of GitopsCluster @@ -67,6 +76,15 @@ type GitopsCluster struct { Status GitopsClusterStatus `json:"status,omitempty"` } +// ClusterReadinessRequeue returns the configured ClusterReadinessBackoff or a default +// value if not configured. +func (c GitopsCluster) ClusterReadinessRequeue() time.Duration { + if v := c.Spec.ClusterReadinessBackoff; v != nil { + return v.Duration + } + return defaultWaitDuration +} + // +kubebuilder:object:root=true // GitopsClusterList contains a list of GitopsCluster diff --git a/api/v1alpha1/gitopscluster_types_test.go b/api/v1alpha1/gitopscluster_types_test.go new file mode 100644 index 0000000..d38844b --- /dev/null +++ b/api/v1alpha1/gitopscluster_types_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2022. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestControl(t *testing.T) { + c := GitopsCluster{} + + if v := c.ClusterReadinessRequeue(); v != defaultWaitDuration { + t.Fatalf("ClusterReadinessRequeue() got %v, want %v", v, defaultWaitDuration) + } + + want := time.Second * 20 + c.Spec.ClusterReadinessBackoff = &metav1.Duration{Duration: want} + if v := c.ClusterReadinessRequeue(); v != want { + t.Fatalf("ClusterReadinessRequeue() got %v, want %v", v, want) + } +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index b460df2..c1d7afd 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -99,6 +99,11 @@ func (in *GitopsClusterSpec) DeepCopyInto(out *GitopsClusterSpec) { *out = new(meta.LocalObjectReference) **out = **in } + if in.ClusterReadinessBackoff != nil { + in, out := &in.ClusterReadinessBackoff, &out.ClusterReadinessBackoff + *out = new(v1.Duration) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitopsClusterSpec. diff --git a/config/crd/bases/gitops.weave.works_gitopsclusters.yaml b/config/crd/bases/gitops.weave.works_gitopsclusters.yaml index 39264fb..24cd53c 100644 --- a/config/crd/bases/gitops.weave.works_gitopsclusters.yaml +++ b/config/crd/bases/gitops.weave.works_gitopsclusters.yaml @@ -54,6 +54,10 @@ spec: required: - name type: object + clusterReadinessBackoff: + description: When checking for readiness, this is the time to wait + before checking again. + type: string secretRef: description: SecretRef specifies the Secret containing the kubeconfig for a cluster. diff --git a/controllers/cluster.go b/controllers/cluster.go new file mode 100644 index 0000000..54dd85f --- /dev/null +++ b/controllers/cluster.go @@ -0,0 +1,70 @@ +package controllers + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + deprecatedControlPlaneLabel = "node-role.kubernetes.io/master" + controlPlaneLabel = "node-role.kubernetes.io/control-plane" +) + +// IsControlPlaneReady takes a client connected to a cluster and reports whether or +// not the control-plane for the cluster is "ready". +func IsControlPlaneReady(ctx context.Context, cl client.Client) (bool, error) { + logger := log.FromContext(ctx) + readiness := []bool{} + readyNodes, err := listReadyNodesWithLabel(ctx, logger, cl, controlPlaneLabel) + if err != nil { + return false, err + } + readiness = append(readiness, readyNodes...) + + if len(readyNodes) == 0 { + readyNodes, err := listReadyNodesWithLabel(ctx, logger, cl, deprecatedControlPlaneLabel) + if err != nil { + return false, err + } + readiness = append(readiness, readyNodes...) + } + + isReady := func(bools []bool) bool { + for _, v := range bools { + if !v { + return false + } + } + return true + } + logger.Info("readiness", "len", len(readiness), "is-ready", isReady(readiness)) + + // If we have no statuses, then we really don't know if we're ready or not. + return (len(readiness) > 0 && isReady(readiness)), nil +} + +func listReadyNodesWithLabel(ctx context.Context, logger logr.Logger, cl client.Client, label string) ([]bool, error) { + nodes := &corev1.NodeList{} + // https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#design-details + err := cl.List(ctx, nodes, client.HasLabels([]string{label})) + if err != nil { + return nil, fmt.Errorf("failed to query cluster node list: %w", err) + } + logger.Info("listed nodes with control plane label", "label", label, "count", len(nodes.Items)) + + readiness := []bool{} + for _, node := range nodes.Items { + for _, c := range node.Status.Conditions { + switch c.Type { + case corev1.NodeReady: + readiness = append(readiness, c.Status == corev1.ConditionTrue) + } + } + } + return readiness, nil +} diff --git a/controllers/cluster_test.go b/controllers/cluster_test.go new file mode 100644 index 0000000..e58e4ad --- /dev/null +++ b/controllers/cluster_test.go @@ -0,0 +1,96 @@ +package controllers_test + +import ( + "context" + "testing" + + "github.com/weaveworks/cluster-controller/controllers" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestIsControlPlaneReady(t *testing.T) { + controlPlaneLabels := map[string]string{ + "node-role.kubernetes.io/master": "", + "node-role.kubernetes.io/control-plane": "", + "beta.kubernetes.io/arch": "amd64", + "beta.kubernetes.io/os": "linux", + "kubernetes.io/arch": "amd64", + "kubernetes.io/hostname": "kind-control-plane", + "kubernetes.io/os": "linux", + } + + nodeTests := []struct { + name string + labels map[string]string + conditions []corev1.NodeCondition + wantReady bool + }{ + { + name: "control plane not ready", + labels: controlPlaneLabels, + conditions: makeConditions( + corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletNotReady", Message: "container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized"}, + ), + }, + { + name: "control plane ready", + labels: controlPlaneLabels, + conditions: makeConditions( + corev1.NodeCondition{Type: "NetworkUnavailable", Status: "False", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "CalicoIsUp", Message: "Calico is running on this node"}, + corev1.NodeCondition{Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"}, + ), + wantReady: true, + }, + { + name: "no control plane", + labels: map[string]string{}, + conditions: makeConditions( + corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletNotReady", Message: "container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized"}, + ), + }, + } + for _, tt := range nodeTests { + t.Run(tt.name, func(t *testing.T) { + cl := makeClient(makeNode(tt.labels, tt.conditions...)) + + ready, err := controllers.IsControlPlaneReady(context.TODO(), cl) + if err != nil { + t.Fatal(err) + } + + if ready != tt.wantReady { + t.Fatalf("IsControlPlaneReady() got %v, want %v", ready, tt.wantReady) + } + }) + } +} + +func makeNode(labels map[string]string, conds ...corev1.NodeCondition) *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-control-plane", + Labels: labels, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{ + Conditions: conds, + }, + } +} + +func makeClient(objs ...runtime.Object) client.Client { + return fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() +} + +func makeConditions(conds ...corev1.NodeCondition) []corev1.NodeCondition { + base := []corev1.NodeCondition{ + corev1.NodeCondition{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletHasSufficientMemory", Message: "kubelet has sufficient memory available"}, + corev1.NodeCondition{Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletHasNoDiskPressure", Message: "kubelet has no disk pressure"}, + corev1.NodeCondition{Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse, LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletHasSufficientPID", Message: "kubelet has sufficient PID available"}, + } + return append(conds, base...) +} diff --git a/controllers/gitopscluster_controller.go b/controllers/gitopscluster_controller.go index cc0e9d2..5b24bb3 100644 --- a/controllers/gitopscluster_controller.go +++ b/controllers/gitopscluster_controller.go @@ -27,9 +27,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/source" @@ -53,7 +55,18 @@ const ( // GitopsClusterReconciler reconciles a GitopsCluster object type GitopsClusterReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + ConfigParser func(b []byte) (client.Client, error) +} + +// NewGitopsClusterReconciler creates and returns a configured +// reconciler ready for use. +func NewGitopsClusterReconciler(c client.Client, s *runtime.Scheme) *GitopsClusterReconciler { + return &GitopsClusterReconciler{ + Client: c, + Scheme: s, + ConfigParser: kubeConfigBytesToClient, + } } // +kubebuilder:rbac:groups=gitops.weave.works,resources=gitopsclusters,verbs=get;list;watch;create;update;patch;delete @@ -86,11 +99,15 @@ func (r *GitopsClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques // TODO: this could _possibly_ be controllable by the // `GitopsCluster` itself. log.Info("waiting for cluster secret to be available") + conditions.MarkFalse(cluster, meta.ReadyCondition, gitopsv1alpha1.WaitingForSecretReason, "") + if err := r.Status().Update(ctx, cluster); err != nil { + log.Error(err, "failed to update Cluster status") + return ctrl.Result{}, err + } return ctrl.Result{RequeueAfter: MissingSecretRequeueTime}, nil } e := fmt.Errorf("failed to get secret %q: %w", name, err) conditions.MarkFalse(cluster, meta.ReadyCondition, gitopsv1alpha1.WaitingForSecretReason, e.Error()) - if err := r.Status().Update(ctx, cluster); err != nil { log.Error(err, "failed to update Cluster status") return ctrl.Result{}, err @@ -116,7 +133,6 @@ func (r *GitopsClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques if err := r.Get(ctx, name, &capiCluster); err != nil { e := fmt.Errorf("failed to get CAPI cluster %q: %w", name, err) conditions.MarkFalse(cluster, meta.ReadyCondition, gitopsv1alpha1.WaitingForCAPIClusterReason, e.Error()) - if err := r.Status().Update(ctx, cluster); err != nil { log.Error(err, "failed to update Cluster status") return ctrl.Result{}, err @@ -126,6 +142,24 @@ func (r *GitopsClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques } log.Info("CAPI Cluster found", "CAPI cluster", name) + + clusterName := types.NamespacedName{Name: cluster.GetName(), Namespace: cluster.GetNamespace()} + clusterClient, err := r.clientForCluster(ctx, *cluster) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create client of cluster %s: %w", clusterName, err) + } + + ready, err := IsControlPlaneReady(ctx, clusterClient) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to check readiness of cluster %s: %w", clusterName, err) + } + + if !ready { + log.Info("waiting for control plane to be ready", "cluster", clusterName) + + return ctrl.Result{RequeueAfter: cluster.ClusterReadinessRequeue()}, nil + } + conditions.MarkTrue(cluster, meta.ReadyCondition, gitopsv1alpha1.CAPIClusterFoundReason, "") if err := r.Status().Update(ctx, cluster); err != nil { log.Error(err, "failed to update Cluster status") @@ -224,3 +258,59 @@ func (r *GitopsClusterReconciler) requestsForCAPIClusterChange(o client.Object) } return reqs } + +func (r *GitopsClusterReconciler) clientForCluster(ctx context.Context, cluster gitopsv1alpha1.GitopsCluster) (client.Client, error) { + kubeConfigBytes, err := r.getKubeConfig(ctx, cluster) + if err != nil { + return nil, err + } + + client, err := r.ConfigParser(kubeConfigBytes) + if err != nil { + return nil, fmt.Errorf("getting client for cluster %s: %w", cluster.Name, err) + } + return client, nil +} + +func (r *GitopsClusterReconciler) getKubeConfig(ctx context.Context, cluster gitopsv1alpha1.GitopsCluster) ([]byte, error) { + secretName := types.NamespacedName{ + Namespace: cluster.GetNamespace(), + Name: cluster.Spec.CAPIClusterRef.Name + "-kubeconfig", + } + + var secret corev1.Secret + if err := r.Client.Get(ctx, secretName, &secret); err != nil { + return nil, fmt.Errorf("unable to read KubeConfig secret %q error: %w", secretName, err) + } + + var kubeConfig []byte + for k := range secret.Data { + if k == "value" || k == "value.yaml" { + kubeConfig = secret.Data[k] + break + } + } + + if len(kubeConfig) == 0 { + return nil, fmt.Errorf("KubeConfig secret %q doesn't contain a 'value' key ", secretName) + } + + return kubeConfig, nil +} + +func kubeConfigBytesToClient(b []byte) (client.Client, error) { + restConfig, err := clientcmd.RESTConfigFromKubeConfig(b) + if err != nil { + return nil, fmt.Errorf("failed to parse KubeConfig from secret: %w", err) + } + restMapper, err := apiutil.NewDynamicRESTMapper(restConfig) + if err != nil { + return nil, fmt.Errorf("failed to create RESTMapper from config: %w", err) + } + + client, err := client.New(restConfig, client.Options{Mapper: restMapper}) + if err != nil { + return nil, fmt.Errorf("failed to create a client from config: %w", err) + } + return client, nil +} diff --git a/controllers/gitopscluster_controller_test.go b/controllers/gitopscluster_controller_test.go index b14678c..cc77f4e 100644 --- a/controllers/gitopscluster_controller_test.go +++ b/controllers/gitopscluster_controller_test.go @@ -27,32 +27,34 @@ const ( func TestReconcile(t *testing.T) { tests := []struct { - name string - state []runtime.Object - obj types.NamespacedName - requeueAfter time.Duration - errString string + name string + runtimeObjects []runtime.Object + gitopsCluster *gitopsv1alpha1.GitopsCluster + expectedCondition *metav1.Condition + obj types.NamespacedName + requeueAfter time.Duration + errString string }{ { name: "secret does not exist", - state: []runtime.Object{ - makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { - c.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "missing", - } - }), - }, - obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, - requeueAfter: controllers.MissingSecretRequeueTime, + gitopsCluster: makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "missing", + } + }), + expectedCondition: &metav1.Condition{Type: "Ready", Status: "False"}, + obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, + requeueAfter: controllers.MissingSecretRequeueTime, }, { name: "secret exists", - state: []runtime.Object{ - makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { - c.Spec.SecretRef = &meta.LocalObjectReference{ - Name: "dev", - } - }), + gitopsCluster: makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dev", + } + }), + expectedCondition: &metav1.Condition{Type: "Ready", Status: "True"}, + runtimeObjects: []runtime.Object{ makeTestSecret(types.NamespacedName{ Name: "dev", Namespace: testNamespace, @@ -61,29 +63,74 @@ func TestReconcile(t *testing.T) { obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, }, { - name: "CAPI cluster does not exist", - state: []runtime.Object{ - makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { - c.Spec.CAPIClusterRef = &meta.LocalObjectReference{ - Name: "missing", - } + name: "CAPI cluster does not exist", + expectedCondition: &metav1.Condition{Type: "Ready", Status: "False"}, + gitopsCluster: makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.Spec.CAPIClusterRef = &meta.LocalObjectReference{ + Name: "missing", + } + }), + obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, + errString: "failed to get CAPI cluster.*missing.*not found", + }, + { + name: "CAPI cluster exists but no secret", + gitopsCluster: makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.Spec.CAPIClusterRef = &meta.LocalObjectReference{ + Name: "dev", + } + }), + runtimeObjects: []runtime.Object{ + makeTestCAPICluster(types.NamespacedName{ + Name: "dev", + Namespace: testNamespace, }), }, obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, - errString: "failed to get CAPI cluster.*missing.*not found", + errString: `failed to create client of cluster testing/test-cluster: unable to read KubeConfig secret "testing/dev-kubeconfig"`, }, { - name: "CAPI cluster exists", - state: []runtime.Object{ - makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { - c.Spec.CAPIClusterRef = &meta.LocalObjectReference{ - Name: "dev", - } + name: "Control plane not ready", + gitopsCluster: makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.Spec.CAPIClusterRef = &meta.LocalObjectReference{ + Name: "dev", + } + }), + runtimeObjects: []runtime.Object{ + makeTestCAPICluster(types.NamespacedName{ + Name: "dev", + Namespace: testNamespace, }), + makeTestSecret(types.NamespacedName{ + Name: "dev-kubeconfig", + Namespace: testNamespace, + }, map[string][]byte{"value": []byte("foo")}), + }, + obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, + requeueAfter: time.Minute * 1, + }, + { + name: "Control plane ready", + gitopsCluster: makeTestCluster(func(c *gitopsv1alpha1.GitopsCluster) { + c.Spec.CAPIClusterRef = &meta.LocalObjectReference{ + Name: "dev", + } + }), + expectedCondition: &metav1.Condition{Type: "Ready", Status: "True"}, + runtimeObjects: []runtime.Object{ + makeNode(map[string]string{ + "node-role.kubernetes.io/master": "", + }, corev1.NodeCondition{Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), + LastTransitionTime: metav1.Now(), Reason: "KubeletReady", + Message: "kubelet is posting ready status"}), makeTestCAPICluster(types.NamespacedName{ Name: "dev", Namespace: testNamespace, }), + makeTestSecret(types.NamespacedName{ + Name: "dev-kubeconfig", + Namespace: testNamespace, + }, map[string][]byte{"value": []byte("foo")}), }, obj: types.NamespacedName{Namespace: testNamespace, Name: testName}, }, @@ -91,7 +138,11 @@ func TestReconcile(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := makeTestReconciler(t, tt.state...) + s, tc := makeTestClientAndScheme(t, append(tt.runtimeObjects, tt.gitopsCluster)...) + r := makeTestReconciler(t, tc, s) + r.ConfigParser = func(b []byte) (client.Client, error) { + return r.Client, nil + } result, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: tt.obj}) @@ -99,18 +150,43 @@ func TestReconcile(t *testing.T) { t.Fatalf("Reconcile() RequeueAfter got %v, want %v", result.RequeueAfter, tt.requeueAfter) } assertErrorMatch(t, tt.errString, err) + + assertCondition(t, tc, tt.expectedCondition) }) } } -func makeTestReconciler(t *testing.T, objs ...runtime.Object) controllers.GitopsClusterReconciler { - s, tc := makeTestClientAndScheme(t, objs...) - return controllers.GitopsClusterReconciler{ - Client: tc, - Scheme: s, +func assertCondition(t *testing.T, c client.Client, expectedCondition *metav1.Condition) { + found := false + if expectedCondition != nil { + name := types.NamespacedName{ + Namespace: testNamespace, + Name: testName, + } + var gc gitopsv1alpha1.GitopsCluster + if err := c.Get(context.Background(), name, &gc); err != nil { + t.Fatalf("couldn't find the cluster: %v", err) + } + for _, c := range gc.Status.Conditions { + if c.Type == expectedCondition.Type { + found = true + } + got := c.Status + want := expectedCondition.Status + if got != want { + t.Fatalf("status did not match, got %s, want %s", got, want) + } + } + if !found { + t.Fatalf("did not find condition %s", expectedCondition.Type) + } } } +func makeTestReconciler(t *testing.T, c client.Client, s *runtime.Scheme) *controllers.GitopsClusterReconciler { + return controllers.NewGitopsClusterReconciler(c, s) +} + func makeTestClientAndScheme(t *testing.T, objs ...runtime.Object) (*runtime.Scheme, client.Client) { t.Helper() s := runtime.NewScheme() diff --git a/main.go b/main.go index a23d21a..843866a 100644 --- a/main.go +++ b/main.go @@ -78,10 +78,7 @@ func main() { os.Exit(1) } - if err = (&controllers.GitopsClusterReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { + if err = controllers.NewGitopsClusterReconciler(mgr.GetClient(), mgr.GetScheme()).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "GitopsCluster") os.Exit(1) }