From 760138ea0471269ba210b9347fd22522b36ad650 Mon Sep 17 00:00:00 2001 From: nitintecg Date: Tue, 3 Sep 2024 02:14:45 +0530 Subject: [PATCH] moved k8sclient to cyclops-ctrl pkg package --- cyclops-ctrl/cmd/main/main.go | 2 +- cyclops-ctrl/internal/controller/cluster.go | 2 +- cyclops-ctrl/internal/controller/modules.go | 2 +- cyclops-ctrl/internal/controller/templates.go | 2 +- cyclops-ctrl/internal/handler/handler.go | 2 +- .../modulecontroller/module_controller.go | 2 +- .../internal/template/render/render.go | 2 +- cyclops-ctrl/pkg/cluster/k8sclient/client.go | 1079 +++++++++++++++++ cyclops-ctrl/pkg/cluster/k8sclient/modules.go | 782 ++++++++++++ .../cluster/k8sclient/templateauthrules.go | 28 + .../pkg/cluster/k8sclient/templatestore.go | 32 + cyctl/go.mod | 28 +- cyctl/go.sum | 70 +- 13 files changed, 2001 insertions(+), 32 deletions(-) create mode 100644 cyclops-ctrl/pkg/cluster/k8sclient/client.go create mode 100644 cyclops-ctrl/pkg/cluster/k8sclient/modules.go create mode 100644 cyclops-ctrl/pkg/cluster/k8sclient/templateauthrules.go create mode 100644 cyclops-ctrl/pkg/cluster/k8sclient/templatestore.go diff --git a/cyclops-ctrl/cmd/main/main.go b/cyclops-ctrl/cmd/main/main.go index 1c8a0fbd7..b669cc601 100644 --- a/cyclops-ctrl/cmd/main/main.go +++ b/cyclops-ctrl/cmd/main/main.go @@ -20,7 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/auth" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/handler" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/modulecontroller" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/prometheus" diff --git a/cyclops-ctrl/internal/controller/cluster.go b/cyclops-ctrl/internal/controller/cluster.go index 0adbdfb30..55e3ade4d 100644 --- a/cyclops-ctrl/internal/controller/cluster.go +++ b/cyclops-ctrl/internal/controller/cluster.go @@ -7,7 +7,7 @@ import ( "github.com/gin-gonic/gin" "k8s.io/apimachinery/pkg/api/errors" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/mapper" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models/dto" ) diff --git a/cyclops-ctrl/internal/controller/modules.go b/cyclops-ctrl/internal/controller/modules.go index bb9b8ae40..db3d2310b 100644 --- a/cyclops-ctrl/internal/controller/modules.go +++ b/cyclops-ctrl/internal/controller/modules.go @@ -10,7 +10,7 @@ import ( "github.com/gin-gonic/gin" "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/mapper" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models/dto" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/prometheus" diff --git a/cyclops-ctrl/internal/controller/templates.go b/cyclops-ctrl/internal/controller/templates.go index 94688af09..e350ce2e1 100644 --- a/cyclops-ctrl/internal/controller/templates.go +++ b/cyclops-ctrl/internal/controller/templates.go @@ -9,7 +9,7 @@ import ( "github.com/gin-gonic/gin" json "github.com/json-iterator/go" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/mapper" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models/dto" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/telemetry" diff --git a/cyclops-ctrl/internal/handler/handler.go b/cyclops-ctrl/internal/handler/handler.go index 391feb646..3372ea1e8 100644 --- a/cyclops-ctrl/internal/handler/handler.go +++ b/cyclops-ctrl/internal/handler/handler.go @@ -5,7 +5,7 @@ import ( "github.com/gin-gonic/gin" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/controller" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/prometheus" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/telemetry" diff --git a/cyclops-ctrl/internal/modulecontroller/module_controller.go b/cyclops-ctrl/internal/modulecontroller/module_controller.go index 1d2f23bb4..040430b4d 100644 --- a/cyclops-ctrl/internal/modulecontroller/module_controller.go +++ b/cyclops-ctrl/internal/modulecontroller/module_controller.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" cyclopsv1alpha1 "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/telemetry" templaterepo "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/template" diff --git a/cyclops-ctrl/internal/template/render/render.go b/cyclops-ctrl/internal/template/render/render.go index 5840a70eb..054318921 100644 --- a/cyclops-ctrl/internal/template/render/render.go +++ b/cyclops-ctrl/internal/template/render/render.go @@ -10,7 +10,7 @@ import ( "helm.sh/helm/v3/pkg/engine" cyclopsv1alpha1 "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" - "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/cluster/k8sclient" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/pkg/cluster/k8sclient" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models" "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models/helm" ) diff --git a/cyclops-ctrl/pkg/cluster/k8sclient/client.go b/cyclops-ctrl/pkg/cluster/k8sclient/client.go new file mode 100644 index 000000000..d1589fdb4 --- /dev/null +++ b/cyclops-ctrl/pkg/cluster/k8sclient/client.go @@ -0,0 +1,1079 @@ +package k8sclient + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "os" + "os/exec" + "sort" + "strings" + "time" + + "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" + + "gopkg.in/yaml.v2" + + v12 "k8s.io/api/apps/v1" + v1 "k8s.io/api/autoscaling/v1" + apiv1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1/client" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models/dto" +) + +const ( + kubectl = "kubectl" + cyclopsNamespace = "cyclops" +) + +type KubernetesClient struct { + Dynamic dynamic.Interface + + clientset *kubernetes.Clientset + + discovery *discovery.DiscoveryClient + + moduleset *client.CyclopsV1Alpha1Client +} + +func New() (*KubernetesClient, error) { + return createLocalClient() +} + +func createLocalClient() (*KubernetesClient, error) { + config := ctrl.GetConfigOrDie() + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + + moduleSet, err := client.NewForConfig(config) + if err != nil { + panic(err) + } + + discovery := discovery.NewDiscoveryClientForConfigOrDie(config) + + dynamic, err := dynamic.NewForConfig(config) + if err != nil { + panic(err.Error()) + } + + return &KubernetesClient{ + Dynamic: dynamic, + discovery: discovery, + clientset: clientset, + moduleset: moduleSet, + }, nil +} + +func (k *KubernetesClient) VersionInfo() (*version.Info, error) { + return k.discovery.ServerVersion() +} + +func (k *KubernetesClient) GetDeployment(namespace, name string) (*v12.Deployment, error) { + deploymentClient := k.clientset.AppsV1().Deployments(namespace) + return deploymentClient.Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (k *KubernetesClient) GetDeployments(namespace string) ([]v12.Deployment, error) { + deploymentClient := k.clientset.AppsV1().Deployments(namespace) + deploymentList, err := deploymentClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + return deploymentList.Items, err +} + +func (k *KubernetesClient) GetScale(namespace, name string) (*v1.Scale, error) { + deploymentClient := k.clientset.AppsV1().Deployments(namespace) + return deploymentClient.GetScale(context.TODO(), name, metav1.GetOptions{}) +} + +func (k *KubernetesClient) UpdateScale(namespace, name string, sc v1.Scale) error { + deploymentClient := k.clientset.AppsV1().Deployments(namespace) + _, err := deploymentClient.UpdateScale(context.TODO(), name, &sc, metav1.UpdateOptions{}) + return err +} + +func (k *KubernetesClient) Deploy(deploymentSpec *v12.Deployment) error { + namespace := deploymentSpec.Namespace + if len(namespace) == 0 { + namespace = apiv1.NamespaceDefault + } + deploymentClient := k.clientset.AppsV1().Deployments(namespace) + + _, err := deploymentClient.Get(context.TODO(), deploymentSpec.Name, metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + _, err := deploymentClient.Create(context.TODO(), deploymentSpec, metav1.CreateOptions{}) + return err + } else { + return err + } + } else { + _, err := deploymentClient.Update(context.TODO(), deploymentSpec, metav1.UpdateOptions{}) + return err + } +} + +func (k *KubernetesClient) DeployService(service *apiv1.Service) error { + namespace := service.Namespace + if len(namespace) == 0 { + namespace = apiv1.NamespaceDefault + } + serviceClient := k.clientset.CoreV1().Services(namespace) + + _, err := serviceClient.Get(context.TODO(), service.Name, metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + _, err := serviceClient.Create(context.TODO(), service, metav1.CreateOptions{}) + return err + } else { + return err + } + } else { + _, err := serviceClient.Update(context.TODO(), service, metav1.UpdateOptions{}) + return err + } +} + +func (k *KubernetesClient) GetPods(namespace, name string) ([]apiv1.Pod, error) { + podClient := k.clientset.CoreV1().Pods(namespace) + podList, err := podClient.List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%v", name), + }) + + if err != nil { + return nil, err + } + + return podList.Items, err +} + +func (k *KubernetesClient) GetPodLogs(namespace, container, name string, numLogs *int64) ([]string, error) { + podLogOptions := apiv1.PodLogOptions{ + Container: container, + TailLines: numLogs, + Timestamps: true, + } + podClient := k.clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOptions) + stream, err := podClient.Stream(context.Background()) + if err != nil { + return nil, err + } + + defer func(stream io.ReadCloser) { + err := stream.Close() + if err != nil { + return + } + }(stream) + + var logs []string + scanner := bufio.NewScanner(stream) + for scanner.Scan() { + logs = append(logs, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return nil, err + } + + return logs, nil +} + +func (k *KubernetesClient) GetDeploymentLogs(namespace, container, deployment string, numLogs *int64) ([]string, error) { + deploymentClient := k.clientset.AppsV1().Deployments(namespace) + deploymentObj, err := deploymentClient.Get(context.Background(), deployment, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + pods, err := k.clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(deploymentObj.Spec.Selector.MatchLabels).String(), + }) + if err != nil { + return nil, err + } + + var logs []string + for _, pod := range pods.Items { + podLogs, err := k.GetPodLogs(namespace, container, pod.Name, numLogs) + if err != nil { + return nil, err + } + logs = append(logs, podLogs...) + } + sort.Strings(logs) + return logs, nil +} + +func (k *KubernetesClient) GetStatefulSetsLogs(namespace, container, name string, numLogs *int64) ([]string, error) { + statefulsetClient := k.clientset.AppsV1().StatefulSets(namespace) + statefulsetObj, err := statefulsetClient.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + pods, err := k.clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(statefulsetObj.Spec.Selector.MatchLabels).String(), + }) + if err != nil { + return nil, err + } + + var logs []string + for _, pod := range pods.Items { + podLogs, err := k.GetPodLogs(namespace, container, pod.Name, numLogs) + if err != nil { + return nil, err + } + logs = append(logs, podLogs...) + } + sort.Strings(logs) + return logs, nil +} + +func (k *KubernetesClient) RestartDeployment(name, namespace string) error { + deployment, err := k.clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if deployment.Spec.Template.ObjectMeta.Annotations == nil { + deployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deployment.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + + _, err = k.clientset.AppsV1().Deployments(namespace).Update(context.Background(), deployment, metav1.UpdateOptions{}) + return err +} + +func (k *KubernetesClient) RestartStatefulSet(name, namespace string) error { + statefulset, err := k.clientset.AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if statefulset.Spec.Template.ObjectMeta.Annotations == nil { + statefulset.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + statefulset.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + + _, err = k.clientset.AppsV1().StatefulSets(namespace).Update(context.Background(), statefulset, metav1.UpdateOptions{}) + return err +} + +func (k *KubernetesClient) RestartDaemonSet(name, namespace string) error { + daemonset, err := k.clientset.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + + if daemonset.Spec.Template.ObjectMeta.Annotations == nil { + daemonset.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + daemonset.Spec.Template.ObjectMeta.Annotations["kubectl.kubernetes.io/restartedAt"] = time.Now().Format(time.RFC3339) + + _, err = k.clientset.AppsV1().DaemonSets(namespace).Update(context.Background(), daemonset, metav1.UpdateOptions{}) + return err +} + +func (k *KubernetesClient) GetManifest(group, version, kind, name, namespace string, includeManagedFields bool) (string, error) { + apiResourceName, err := k.GVKtoAPIResourceName(schema.GroupVersion{Group: group, Version: version}, kind) + if err != nil { + return "", err + } + + resource, err := k.Dynamic.Resource(schema.GroupVersionResource{ + Group: group, + Version: version, + Resource: apiResourceName, + }).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + if !includeManagedFields { + resource.SetManagedFields(nil) + } + + data, err := yaml.Marshal(resource.Object) + if err != nil { + return "", err + } + + return string(data), nil +} + +func (k *KubernetesClient) Restart(group, version, kind, name, namespace string) error { + switch { + case isDeployment(group, version, kind): + return k.RestartDeployment(name, namespace) + case isDaemonSet(group, version, kind): + return k.RestartDaemonSet(name, namespace) + case isStatefulSet(group, version, kind): + return k.RestartStatefulSet(name, namespace) + } + + return errors.New(fmt.Sprintf("cannot restart: %v/%v %v %v/%v", group, version, kind, namespace, name)) +} + +func (k *KubernetesClient) GetResource(group, version, kind, name, namespace string) (any, error) { + switch { + case isDeployment(group, version, kind): + return k.mapDeployment(group, version, kind, name, namespace) + case isDaemonSet(group, version, kind): + return k.mapDaemonSet(group, version, kind, name, namespace) + case isService(group, version, kind): + return k.mapService(group, version, kind, name, namespace) + case isStatefulSet(group, version, kind): + return k.mapStatefulSet(group, version, kind, name, namespace) + case isPod(group, version, kind): + return k.mapPod(group, version, kind, name, namespace) + case isConfigMap(group, version, kind): + return k.mapConfigMap(group, version, kind, name, namespace) + case isPersistentVolume(group, version, kind): + return k.mapPersistentVolumes(group, version, kind, name, namespace) + case isPersistentVolumeClaims(group, version, kind): + return k.mapPersistentVolumeClaims(group, version, kind, name, namespace) + case isSecret(group, version, kind): + return k.mapSecret(group, version, kind, name, namespace) + case isCronJob(group, version, kind): + return k.mapCronJob(group, version, kind, name, namespace) + case isJob(group, version, kind): + return k.mapJob(group, version, kind, name, namespace) + case isRole(group, version, kind): + return k.mapRole(group, version, kind, name, namespace) + case isNetworkPolicy(group, version, kind): + return k.mapNetworkPolicy(group, version, kind, name, namespace) + } + + return nil, nil +} + +func (k *KubernetesClient) GetAllNamespacePods() ([]apiv1.Pod, error) { + podClient := k.clientset.CoreV1().Pods(apiv1.NamespaceDefault) + podList, err := podClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + return podList.Items, nil +} + +func (k *KubernetesClient) GetNamespaces() ([]apiv1.Namespace, error) { + namespaceClient := k.clientset.CoreV1().Namespaces() + namespaces, err := namespaceClient.List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + return namespaces.Items, err +} + +func (k *KubernetesClient) GetDeploymentsYaml(name string, namespace string) (*bytes.Buffer, error) { + buff := new(bytes.Buffer) + command := exec.Command(kubectl, "get", "deployments", name, "-n", namespace, "-o", "yaml") + command.Stdout = buff + command.Stderr = os.Stderr + return buff, command.Run() +} + +func (k *KubernetesClient) Delete(resource dto.Resource) error { + apiResourceName, err := k.GVKtoAPIResourceName( + schema.GroupVersion{ + Group: resource.GetGroup(), + Version: resource.GetVersion(), + }, resource.GetKind()) + if err != nil { + return err + } + + gvr := schema.GroupVersionResource{ + Group: resource.GetGroup(), + Version: resource.GetVersion(), + Resource: apiResourceName, + } + + return k.Dynamic.Resource(gvr).Namespace(resource.GetNamespace()).Delete( + context.Background(), + resource.GetName(), + metav1.DeleteOptions{}, + ) +} + +func (k *KubernetesClient) CreateDynamic(resource v1alpha1.GroupVersionResource, obj *unstructured.Unstructured) error { + gvr := schema.GroupVersionResource{ + Group: resource.Group, + Version: resource.Version, + Resource: resource.Resource, + } + + objNamespace := obj.GetNamespace() + if len(strings.TrimSpace(objNamespace)) == 0 { + objNamespace = apiv1.NamespaceDefault + } + + isNamespaced, err := k.isResourceNamespaced(obj.GroupVersionKind()) + if err != nil { + return err + } + + if !isNamespaced { + return k.createDynamicNonNamespaced(gvr, obj) + } + + return k.createDynamicNamespaced(gvr, objNamespace, obj) +} + +func (k *KubernetesClient) createDynamicNamespaced( + gvr schema.GroupVersionResource, + namespace string, + obj *unstructured.Unstructured, +) error { + current, err := k.Dynamic.Resource(gvr).Namespace(namespace).Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + _, err := k.Dynamic.Resource(gvr).Namespace(namespace).Create( + context.Background(), + obj, + metav1.CreateOptions{}, + ) + + return err + } + return err + } + + if isJob(obj.GroupVersionKind().Group, obj.GroupVersionKind().Version, obj.GroupVersionKind().Kind) { + if err := copyJobSelectors(current, obj); err != nil { + return err + } + } + + if isPersistentVolumeClaims(obj.GroupVersionKind().Group, obj.GroupVersionKind().Version, obj.GroupVersionKind().Kind) { + if err := mergePVCWithCurrent(current, obj); err != nil { + return err + } + } + + obj.SetResourceVersion(current.GetResourceVersion()) + + _, err = k.Dynamic.Resource(gvr).Namespace(namespace).Update( + context.Background(), + obj, + metav1.UpdateOptions{}, + ) + + return err +} + +func (k *KubernetesClient) createDynamicNonNamespaced( + gvr schema.GroupVersionResource, + obj *unstructured.Unstructured, +) error { + current, err := k.Dynamic.Resource(gvr).Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) + if err != nil { + if k8serrors.IsNotFound(err) { + _, err := k.Dynamic.Resource(gvr).Create( + context.Background(), + obj, + metav1.CreateOptions{}, + ) + + return err + } + return err + } + + obj.SetResourceVersion(current.GetResourceVersion()) + + _, err = k.Dynamic.Resource(gvr).Update( + context.Background(), + obj, + metav1.UpdateOptions{}, + ) + + return err +} + +func (k *KubernetesClient) ApplyCRD(obj *unstructured.Unstructured) error { + gvr := schema.GroupVersionResource{ + Group: "apiextensions.k8s.io", + Version: "v1", + Resource: "customresourcedefinitions", + } + + _, err := k.Dynamic.Resource(gvr).Apply( + context.Background(), + obj.GetName(), + obj, + metav1.ApplyOptions{ + FieldManager: "cyclops-ctrl", + }, + ) + + return err +} + +func copyJobSelectors(source, destination *unstructured.Unstructured) error { + selectors, ok, err := unstructured.NestedMap(source.Object, "spec", "selector") + if err != nil { + return err + } + if !ok { + return errors.New(fmt.Sprintf("job %v selectors not found", source.GetName())) + } + + templateLabels, ok, err := unstructured.NestedMap(source.Object, "spec", "template", "metadata", "labels") + if err != nil { + return err + } + if !ok { + return errors.New(fmt.Sprintf("job %v selectors not found", source.GetName())) + } + + if err := unstructured.SetNestedMap(destination.Object, selectors, "spec", "selector"); err != nil { + return err + } + + return unstructured.SetNestedMap(destination.Object, templateLabels, "spec", "template", "metadata", "labels") +} + +func mergePVCWithCurrent(current, obj *unstructured.Unstructured) error { + requests, ok, err := unstructured.NestedMap(obj.Object, "spec", "resources", "requests") + if err != nil { + return err + } + if !ok { + return fmt.Errorf("PVC %v spec.resources.requests not found", obj.GetName()) + } + + for key, value := range current.Object { + obj.Object[key] = value + } + + return unstructured.SetNestedMap(current.Object, requests, "spec", "resources", "requests") +} + +func (k *KubernetesClient) ListNodes() ([]apiv1.Node, error) { + nodeList, err := k.clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + return nodeList.Items, err +} + +func (k *KubernetesClient) GetNode(name string) (*apiv1.Node, error) { + return k.clientset.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (k *KubernetesClient) GetPodsForNode(nodeName string) ([]apiv1.Pod, error) { + podList, err := k.clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{ + FieldSelector: "spec.nodeName=" + nodeName, + }) + return podList.Items, err +} + +func (k *KubernetesClient) mapDeployment(group, version, kind, name, namespace string) (*dto.Deployment, error) { + deployment, err := k.clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + pods, err := k.getPods(*deployment) + if err != nil { + return nil, err + } + + return &dto.Deployment{ + Group: group, + Version: version, + Kind: kind, + Name: deployment.Name, + Namespace: deployment.Namespace, + Replicas: int(*deployment.Spec.Replicas), + Pods: pods, + Status: getDeploymentStatus(pods), + }, nil +} + +func (k *KubernetesClient) mapDaemonSet(group, version, kind, name, namespace string) (*dto.DaemonSet, error) { + daemonSet, err := k.clientset.AppsV1().DaemonSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + pods, err := k.getPodsForDaemonSet(*daemonSet) + if err != nil { + return nil, err + } + + return &dto.DaemonSet{ + Group: group, + Version: version, + Kind: kind, + Name: daemonSet.Name, + Namespace: daemonSet.Namespace, + Pods: pods, + Status: getDaemonSetStatus(pods), + }, nil +} + +func (k *KubernetesClient) mapStatefulSet(group, version, kind, name, namespace string) (*dto.StatefulSet, error) { + statefulset, err := k.clientset.AppsV1().StatefulSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + pods, err := k.getStatefulsetPods(*statefulset) + if err != nil { + return nil, err + } + + return &dto.StatefulSet{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + Replicas: int(*statefulset.Spec.Replicas), + Pods: pods, + Status: getDeploymentStatus(pods), + }, nil +} + +func (k *KubernetesClient) mapPod(group, version, kind, name, namespace string) (*dto.Pod, error) { + item, err := k.clientset.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + initContainers := make([]dto.Container, 0, len(item.Spec.InitContainers)) + for _, cnt := range item.Spec.InitContainers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + initContainers = append(initContainers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + return &dto.Pod{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + Containers: containers, + InitContainers: initContainers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Status: getPodStatus(containers), + Started: item.Status.StartTime, + Deleted: false, + }, nil +} + +func (k *KubernetesClient) mapService(group, version, kind, name, namespace string) (*dto.Service, error) { + service, err := k.clientset.CoreV1().Services(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return &dto.Service{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + Ports: service.Spec.Ports, + }, nil +} + +func (k *KubernetesClient) mapConfigMap(group, version, kind, name, namespace string) (*dto.ConfigMap, error) { + configmap, err := k.clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return &dto.ConfigMap{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + Data: configmap.Data, + }, nil +} + +func (k *KubernetesClient) mapPersistentVolumeClaims(group, version, kind, name, namespace string) (*dto.PersistentVolumeClaim, error) { + persistentvolumeclaim, err := k.clientset.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + storage := "" + if persistentvolumeclaim.Spec.Resources.Requests != nil && persistentvolumeclaim.Spec.Resources.Requests.Storage() != nil { + storage = persistentvolumeclaim.Spec.Resources.Requests.Storage().String() + } + + return &dto.PersistentVolumeClaim{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + AccessModes: persistentvolumeclaim.Spec.AccessModes, + Size: storage, + }, nil +} + +func (k *KubernetesClient) mapPersistentVolumes(group, version, kind, name, namespace string) (*dto.PersistentVolume, error) { + persistentVolume, err := k.clientset.CoreV1().PersistentVolumes().Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + capacity := "" + if persistentVolume.Spec.Capacity != nil && persistentVolume.Spec.Capacity.Storage() != nil { + capacity = persistentVolume.Spec.Capacity.Storage().String() + } + + claimRef := "" + if persistentVolume.Spec.ClaimRef != nil && persistentVolume.Spec.ClaimRef.Name != "" { + claimRef = persistentVolume.Spec.ClaimRef.Name + } + + return &dto.PersistentVolume{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + AccessModes: persistentVolume.Spec.AccessModes, + PersistentVolumeClaim: claimRef, + Capacity: capacity, + ReclaimPolicy: persistentVolume.Spec.PersistentVolumeReclaimPolicy, + StorageClass: persistentVolume.Spec.StorageClassName, + Status: persistentVolume.Status, + }, nil +} + +func (k *KubernetesClient) mapSecret(group, version, kind, name, namespace string) (*dto.Secret, error) { + secret, err := k.clientset.CoreV1().Secrets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + dataKeys := make([]string, 0, len(secret.Data)) + for key := range secret.Data { + dataKeys = append(dataKeys, key) + } + + return &dto.Secret{ + Group: group, + Version: version, + Kind: kind, + Name: name, + Namespace: namespace, + DataKeys: dataKeys, + Type: string(secret.Type), + }, nil +} + +func (k *KubernetesClient) mapCronJob(group, version, kind, name, namespace string) (*dto.CronJob, error) { + cronJob, err := k.clientset.BatchV1().CronJobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + pods, err := k.getPodsForCronJob(*cronJob) + if err != nil { + return nil, err + } + + status := dto.StatusCronJob{ + LastScheduleTime: cronJob.Status.LastScheduleTime, + LastSuccessfulTime: cronJob.Status.LastSuccessfulTime, + } + + return &dto.CronJob{ + Group: group, + Version: version, + Kind: kind, + Name: cronJob.Name, + Namespace: cronJob.Namespace, + Schedule: cronJob.Spec.Schedule, + Status: status, + Pods: pods, + }, nil +} + +func (k *KubernetesClient) mapJob(group, version, kind, name, namespace string) (*dto.Job, error) { + job, err := k.clientset.BatchV1().Jobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + pods, err := k.getPodsForJob(*job) + if err != nil { + return nil, err + } + + startTime := "" + if job.Status.StartTime != nil { + startTime = job.Status.StartTime.String() + } + + completionTime := "" + if job.Status.CompletionTime != nil { + completionTime = job.Status.CompletionTime.String() + } + + return &dto.Job{ + Group: group, + Version: version, + Kind: kind, + Name: job.Name, + Namespace: job.Namespace, + CompletionTime: completionTime, + StartTime: startTime, + Pods: pods, + }, nil +} + +func (k *KubernetesClient) mapNetworkPolicy(group, version, kind, name, namespace string) (*dto.NetworkPolicy, error) { + networkPolicy, err := k.clientset.NetworkingV1().NetworkPolicies(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + pods, err := k.getPodsForNetworkPolicy(*networkPolicy) + if err != nil { + return nil, err + } + + mappedPolicy := &dto.NetworkPolicy{ + Group: group, + Version: version, + Kind: kind, + Name: networkPolicy.Name, + Namespace: networkPolicy.Namespace, + Pods: pods, + Ingress: mapNetworkPolicyIngressRules(networkPolicy.Spec.Ingress), + Egress: mapNetworkPolicyEgressRules(networkPolicy.Spec.Egress), + } + + return mappedPolicy, nil +} + +func mapNetworkPolicyIngressRules(rules []networkingv1.NetworkPolicyIngressRule) []dto.NetworkPolicyIngressRule { + mapped := make([]dto.NetworkPolicyIngressRule, len(rules)) + for i, rule := range rules { + mapped[i] = dto.NetworkPolicyIngressRule{ + Ports: mapNetworkPolicyPorts(rule.Ports), + From: mapNetworkPolicyPeers(rule.From), + } + } + return mapped +} + +func mapNetworkPolicyEgressRules(rules []networkingv1.NetworkPolicyEgressRule) []dto.NetworkPolicyEgressRule { + mapped := make([]dto.NetworkPolicyEgressRule, len(rules)) + for i, rule := range rules { + mapped[i] = dto.NetworkPolicyEgressRule{ + Ports: mapNetworkPolicyPorts(rule.Ports), + To: mapNetworkPolicyPeers(rule.To), + } + } + return mapped +} + +func mapNetworkPolicyPorts(ports []networkingv1.NetworkPolicyPort) []dto.NetworkPolicyPort { + mapped := make([]dto.NetworkPolicyPort, len(ports)) + for i, port := range ports { + protocol := "" + if port.Protocol != nil { + protocol = string(*port.Protocol) + } + + portValue := intstr.IntOrString{} + if port.Port != nil { + portValue = *port.Port + } + + var endPort int32 + if port.EndPort != nil { + endPort = *port.EndPort + } + + mapped[i] = dto.NetworkPolicyPort{ + Protocol: protocol, + Port: portValue, + EndPort: endPort, + } + } + return mapped +} + +func mapNetworkPolicyPeers(peers []networkingv1.NetworkPolicyPeer) []dto.NetworkPolicyPeer { + mapped := make([]dto.NetworkPolicyPeer, len(peers)) + for i, peer := range peers { + mapped[i] = dto.NetworkPolicyPeer{ + IPBlock: mapIPBlock(peer.IPBlock), + } + } + return mapped +} + +func mapIPBlock(block *networkingv1.IPBlock) *dto.IPBlock { + if block == nil { + return nil + } + return &dto.IPBlock{ + CIDR: block.CIDR, + Except: block.Except, + } +} + +func (k *KubernetesClient) isResourceNamespaced(gvk schema.GroupVersionKind) (bool, error) { + resourcesList, err := k.discovery.ServerPreferredResources() + if err != nil { + return false, err + } + + for _, resource := range resourcesList { + gv, err := schema.ParseGroupVersion(resource.GroupVersion) + if err != nil { + return false, err + } + + for _, apiResource := range resource.APIResources { + if apiResource.Kind == gvk.Kind && + gv.Group == gvk.Group && + gv.Version == gvk.Version { + return apiResource.Namespaced, nil + } + } + } + + return false, errors.New(fmt.Sprintf("group version kind not found: %v", gvk.String())) +} + +func (k *KubernetesClient) mapRole(group, version, kind, name, namespace string) (*dto.Role, error) { + role, err := k.clientset.RbacV1().Roles(namespace).Get(context.Background(), name, metav1.GetOptions{}) + + if err != nil { + return nil, err + } + + return &dto.Role{ + Group: group, + Version: version, + Kind: kind, + Name: role.Name, + Namespace: namespace, + Rules: role.Rules, + }, nil +} + +func isDeployment(group, version, kind string) bool { + return group == "apps" && version == "v1" && kind == "Deployment" +} + +func isDaemonSet(group, version, kind string) bool { + return group == "apps" && version == "v1" && kind == "DaemonSet" +} + +func isStatefulSet(group, version, kind string) bool { + return group == "apps" && version == "v1" && kind == "StatefulSet" +} + +func isPod(group, version, kind string) bool { + return group == "" && version == "v1" && kind == "Pod" +} + +func isJob(group, version, kind string) bool { + return group == "batch" && version == "v1" && kind == "Job" +} + +func isService(group, version, kind string) bool { + return group == "" && version == "v1" && kind == "Service" +} + +func isConfigMap(group, version, kind string) bool { + return group == "" && version == "v1" && kind == "ConfigMap" +} + +func isPersistentVolumeClaims(group, version, kind string) bool { + return group == "" && version == "v1" && kind == "PersistentVolumeClaim" +} + +func isPersistentVolume(group, version, kind string) bool { + return group == "" && version == "v1" && kind == "PersistentVolume" +} + +func isSecret(group, version, kind string) bool { + return group == "" && version == "v1" && kind == "Secret" +} + +func isCronJob(group, version, kind string) bool { + return group == "batch" && version == "v1" && kind == "CronJob" +} + +func isRole(group, version, kind string) bool { + return group == "rbac.authorization.k8s.io" && version == "v1" && kind == "Role" +} + +func isNetworkPolicy(group, version, kind string) bool { + return group == "networking.k8s.io" && version == "v1" && kind == "NetworkPolicy" +} diff --git a/cyclops-ctrl/pkg/cluster/k8sclient/modules.go b/cyclops-ctrl/pkg/cluster/k8sclient/modules.go new file mode 100644 index 000000000..fbe19f1e6 --- /dev/null +++ b/cyclops-ctrl/pkg/cluster/k8sclient/modules.go @@ -0,0 +1,782 @@ +package k8sclient + +import ( + "context" + "sort" + "strings" + + "github.com/pkg/errors" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + apiv1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + yaml2 "k8s.io/apimachinery/pkg/util/yaml" + + cyclopsv1alpha1 "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" + "github.com/cyclops-ui/cyclops/cyclops-ctrl/internal/models/dto" +) + +const ( + statusUndefined = "undefined" + statusHealthy = "healthy" + statusUnhealthy = "unhealthy" +) + +func (k *KubernetesClient) ListModules() ([]cyclopsv1alpha1.Module, error) { + moduleList, err := k.moduleset.Modules(cyclopsNamespace).List(metav1.ListOptions{}) + return moduleList, err +} + +func (k *KubernetesClient) CreateModule(module cyclopsv1alpha1.Module) error { + _, err := k.moduleset.Modules(cyclopsNamespace).Create(&module) + return err +} + +func (k *KubernetesClient) UpdateModule(module *cyclopsv1alpha1.Module) error { + _, err := k.moduleset.Modules(cyclopsNamespace).Update(module) + return err +} + +func (k *KubernetesClient) UpdateModuleStatus(module *cyclopsv1alpha1.Module) (*cyclopsv1alpha1.Module, error) { + return k.moduleset.Modules(cyclopsNamespace).PatchStatus(module) +} + +func (k *KubernetesClient) DeleteModule(name string) error { + return k.moduleset.Modules(cyclopsNamespace).Delete(name) +} + +func (k *KubernetesClient) GetModule(name string) (*cyclopsv1alpha1.Module, error) { + return k.moduleset.Modules(cyclopsNamespace).Get(name) +} + +func (k *KubernetesClient) GetResourcesForModule(name string) ([]dto.Resource, error) { + out := make([]dto.Resource, 0, 0) + + managedGVRs, err := k.getManagedGVRs(name) + if err != nil { + return nil, err + } + + other := make([]unstructured.Unstructured, 0) + for _, gvr := range managedGVRs { + rs, err := k.Dynamic.Resource(gvr).List(context.Background(), metav1.ListOptions{ + LabelSelector: "cyclops.module=" + name, + }) + if err != nil { + continue + } + + for _, item := range rs.Items { + other = append(other, item) + } + } + + for _, o := range other { + status, err := k.getResourceStatus(o) + if err != nil { + return nil, err + } + + out = append(out, &dto.Other{ + Group: o.GroupVersionKind().Group, + Version: o.GroupVersionKind().Version, + Kind: o.GroupVersionKind().Kind, + Name: o.GetName(), + Namespace: o.GetNamespace(), + Status: status, + Deleted: false, + }) + } + + sort.Slice(out, func(i, j int) bool { + if out[i].GetGroupVersionKind() != out[j].GetGroupVersionKind() { + return out[i].GetGroupVersionKind() < out[j].GetGroupVersionKind() + } + + return out[i].GetName() < out[j].GetName() + }) + + return out, nil +} + +func (k *KubernetesClient) getManagedGVRs(moduleName string) ([]schema.GroupVersionResource, error) { + module, _ := k.GetModule(moduleName) + + if module != nil && len(module.Status.ManagedGVRs) != 0 { + existing := make([]schema.GroupVersionResource, 0, len(module.Status.ManagedGVRs)) + for _, r := range module.Status.ManagedGVRs { + existing = append(existing, schema.GroupVersionResource{ + Group: r.Group, + Version: r.Version, + Resource: r.Resource, + }) + } + + return existing, nil + } + + apiResources, err := k.clientset.Discovery().ServerPreferredResources() + if err != nil { + return nil, err + } + + gvrs := make([]schema.GroupVersionResource, 0) + for _, resource := range apiResources { + gvk, err := schema.ParseGroupVersion(resource.GroupVersion) + if err != nil { + continue + } + + for _, apiResource := range resource.APIResources { + if gvk.Group == "discovery.k8s.io" && gvk.Version == "v1" && apiResource.Kind == "EndpointSlice" || + gvk.Group == "" && gvk.Version == "v1" && apiResource.Kind == "Endpoints" { + continue + } + + gvrs = append(gvrs, schema.GroupVersionResource{ + Group: gvk.Group, + Version: gvk.Version, + Resource: apiResource.Name, + }) + } + } + + return gvrs, nil +} + +func (k *KubernetesClient) GetDeletedResources( + resources []dto.Resource, + manifest string, +) ([]dto.Resource, error) { + resourcesFromTemplate := make(map[string][]dto.Resource, 0) + + for _, s := range strings.Split(manifest, "\n---\n") { + s := strings.TrimSpace(s) + if len(s) == 0 { + continue + } + + var obj unstructured.Unstructured + + decoder := yaml2.NewYAMLOrJSONDecoder(strings.NewReader(s), len(s)) + if err := decoder.Decode(&obj); err != nil { + panic(err) + } + + objGVK := obj.GetObjectKind().GroupVersionKind().String() + resourcesFromTemplate[objGVK] = append(resourcesFromTemplate[objGVK], &dto.Service{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }) + } + + out := make([]dto.Resource, 0, len(resources)) + for _, resource := range resources { + gvk := resource.GetGroupVersionKind() + + if _, ok := resourcesFromTemplate[gvk]; !ok { + resource.SetDeleted(true) + out = append(out, resource) + continue + } + + found := false + for _, rs := range resourcesFromTemplate[gvk] { + if resource.GetName() == rs.GetName() && (resource.GetNamespace() == rs.GetNamespace() || rs.GetNamespace() == "") { + found = true + break + } + } + + if found == false { + resource.SetDeleted(true) + } + + out = append(out, resource) + } + + return out, nil +} + +func (k *KubernetesClient) GetModuleResourcesHealth(name string) (string, error) { + resourcesWithHealth := 0 + + deployments, err := k.clientset.AppsV1().Deployments("").List(context.Background(), metav1.ListOptions{ + LabelSelector: "cyclops.module=" + name, + }) + if err != nil { + return statusUndefined, err + } + + resourcesWithHealth += len(deployments.Items) + for _, item := range deployments.Items { + if item.Generation != item.Status.ObservedGeneration || + item.Status.Replicas != item.Status.UpdatedReplicas || + item.Status.UnavailableReplicas != 0 { + return statusUnhealthy, nil + } + } + + statefulsets, err := k.clientset.AppsV1().StatefulSets("").List(context.Background(), metav1.ListOptions{ + LabelSelector: "cyclops.module=" + name, + }) + if err != nil { + return statusUndefined, err + } + + resourcesWithHealth += len(statefulsets.Items) + for _, item := range statefulsets.Items { + if item.Generation != item.Status.ObservedGeneration || + item.Status.Replicas != item.Status.UpdatedReplicas || + item.Status.Replicas != item.Status.AvailableReplicas { + return statusUnhealthy, nil + } + } + + daemonsets, err := k.clientset.AppsV1().DaemonSets("").List(context.Background(), metav1.ListOptions{ + LabelSelector: "cyclops.module=" + name, + }) + if err != nil { + return statusUndefined, err + } + + resourcesWithHealth += len(daemonsets.Items) + for _, item := range daemonsets.Items { + if item.Generation != item.Status.ObservedGeneration || + item.Status.UpdatedNumberScheduled != item.Status.DesiredNumberScheduled || + item.Status.NumberUnavailable != 0 { + return statusUnhealthy, nil + } + } + + pvcs, err := k.clientset.CoreV1().PersistentVolumeClaims("").List(context.Background(), metav1.ListOptions{ + LabelSelector: "cyclops.module=" + name, + }) + if err != nil { + return statusUndefined, err + } + + resourcesWithHealth += len(pvcs.Items) + for _, item := range pvcs.Items { + if item.Status.Phase != apiv1.ClaimBound { + return statusUnhealthy, nil + } + } + + pods, err := k.clientset.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{ + LabelSelector: "cyclops.module=" + name, + }) + if err != nil { + return statusUndefined, err + } + + resourcesWithHealth += len(pods.Items) + for _, item := range pods.Items { + for _, cnt := range item.Spec.Containers { + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + if !containerStatus(status).Running { + return statusUnhealthy, nil + } + } + } + + if resourcesWithHealth == 0 { + return statusUndefined, nil + } + + return statusHealthy, nil +} + +func (k *KubernetesClient) GVKtoAPIResourceName(gv schema.GroupVersion, kind string) (string, error) { + apiResources, err := k.clientset.Discovery().ServerResourcesForGroupVersion(gv.String()) + if err != nil { + return "", err + } + + for _, resource := range apiResources.APIResources { + if resource.Kind == kind && len(resource.Name) != 0 { + return resource.Name, nil + } + } + + return "", errors.Errorf("could not find api-resource for groupVersion: %v and kind: %v", gv.String(), kind) +} + +func (k *KubernetesClient) getResourceStatus(o unstructured.Unstructured) (string, error) { + if isPod(o.GroupVersionKind().Group, o.GroupVersionKind().Version, o.GetKind()) { + pod, err := k.clientset.CoreV1().Pods(o.GetNamespace()).Get(context.Background(), o.GetName(), metav1.GetOptions{}) + if err != nil { + return statusUndefined, err + } + + for _, cnt := range pod.Spec.Containers { + var status apiv1.ContainerStatus + for _, c := range pod.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + if !containerStatus(status).Running { + return statusUnhealthy, nil + } + } + + return statusHealthy, err + } + + if isDeployment(o.GroupVersionKind().Group, o.GroupVersionKind().Version, o.GetKind()) { + deployment, err := k.clientset.AppsV1().Deployments(o.GetNamespace()).Get(context.Background(), o.GetName(), metav1.GetOptions{}) + if err != nil { + return statusUndefined, err + } + + if deployment.Generation == deployment.Status.ObservedGeneration && + deployment.Status.Replicas == deployment.Status.UpdatedReplicas && + deployment.Status.UnavailableReplicas == 0 { + return statusHealthy, nil + } + + return statusUnhealthy, nil + } + + if isStatefulSet(o.GroupVersionKind().Group, o.GroupVersionKind().Version, o.GetKind()) { + statefulset, err := k.clientset.AppsV1().StatefulSets(o.GetNamespace()).Get(context.Background(), o.GetName(), metav1.GetOptions{}) + if err != nil { + return statusUndefined, err + } + + if statefulset.Generation == statefulset.Status.ObservedGeneration && + statefulset.Status.Replicas == statefulset.Status.UpdatedReplicas && + statefulset.Status.Replicas == statefulset.Status.AvailableReplicas { + return statusHealthy, nil + } + + return statusUnhealthy, nil + } + + if isDaemonSet(o.GroupVersionKind().Group, o.GroupVersionKind().Version, o.GetKind()) { + daemonset, err := k.clientset.AppsV1().DaemonSets(o.GetNamespace()).Get(context.Background(), o.GetName(), metav1.GetOptions{}) + if err != nil { + return statusUndefined, err + } + + if daemonset.Generation == daemonset.Status.ObservedGeneration && + daemonset.Status.UpdatedNumberScheduled == daemonset.Status.DesiredNumberScheduled && + daemonset.Status.NumberUnavailable == 0 { + return statusHealthy, nil + } + + return statusUnhealthy, nil + } + + if isPersistentVolumeClaims(o.GroupVersionKind().Group, o.GroupVersionKind().Version, o.GetKind()) { + pvc, err := k.clientset.CoreV1().PersistentVolumeClaims(o.GetNamespace()).Get(context.Background(), o.GetName(), metav1.GetOptions{}) + if err != nil { + return statusUndefined, err + } + + if pvc.Status.Phase == apiv1.ClaimBound { + return statusHealthy, nil + } + + return statusUnhealthy, nil + } + + return statusUndefined, nil +} + +func (k *KubernetesClient) getPods(deployment appsv1.Deployment) ([]dto.Pod, error) { + pods, err := k.clientset.CoreV1().Pods(deployment.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(deployment.Spec.Selector.MatchLabels).String(), + }) + if err != nil { + return nil, err + } + + out := make([]dto.Pod, 0, len(pods.Items)) + for _, item := range pods.Items { + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + out = append(out, dto.Pod{ + Name: item.Name, + Containers: containers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Started: item.Status.StartTime, + }) + } + + return out, nil +} + +func (k *KubernetesClient) getPodsForDaemonSet(daemonSet appsv1.DaemonSet) ([]dto.Pod, error) { + pods, err := k.clientset.CoreV1().Pods(daemonSet.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(daemonSet.Spec.Selector.MatchLabels).String(), + }) + if err != nil { + return nil, err + } + + out := make([]dto.Pod, 0, len(pods.Items)) + for _, item := range pods.Items { + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + out = append(out, dto.Pod{ + Name: item.Name, + Containers: containers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Started: item.Status.StartTime, + }) + } + + return out, nil +} + +func (k *KubernetesClient) getStatefulsetPods(deployment appsv1.StatefulSet) ([]dto.Pod, error) { + pods, err := k.clientset.CoreV1().Pods(deployment.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(deployment.Spec.Selector.MatchLabels).String(), + }) + if err != nil { + return nil, err + } + + out := make([]dto.Pod, 0, len(pods.Items)) + for _, item := range pods.Items { + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + initContainers := make([]dto.Container, 0, len(item.Spec.Containers)) + for _, cnt := range item.Spec.InitContainers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + initContainers = append(initContainers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + out = append(out, dto.Pod{ + Name: item.Name, + Containers: containers, + InitContainers: initContainers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Started: item.Status.StartTime, + }) + } + + return out, nil +} + +func (k *KubernetesClient) getPodsForCronJob(cronJob batchv1.CronJob) ([]dto.Pod, error) { + jobTemplateLabels := cronJob.Spec.JobTemplate.Spec.Template.Labels + jobLabelSelector := labels.SelectorFromSet(jobTemplateLabels).String() + + jobs, err := k.clientset.BatchV1().Jobs(cronJob.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: jobLabelSelector, + }) + if err != nil { + return nil, err + } + + out := make([]dto.Pod, 0) + + for _, job := range jobs.Items { + podTemplateLabels := job.Spec.Template.Labels + podLabelSelector := labels.SelectorFromSet(podTemplateLabels).String() + + pods, err := k.clientset.CoreV1().Pods(cronJob.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: podLabelSelector, + }) + + if err != nil { + return nil, err + } + + for _, item := range pods.Items { + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + out = append(out, dto.Pod{ + Name: item.Name, + Containers: containers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Started: item.Status.StartTime, + }) + } + } + + return out, nil + +} + +func (k *KubernetesClient) getPodsForJob(job batchv1.Job) ([]dto.Pod, error) { + pods, err := k.clientset.CoreV1().Pods(job.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(job.Spec.Selector.MatchLabels).String(), + }) + + if err != nil { + return nil, err + } + + out := make([]dto.Pod, 0, len(pods.Items)) + for _, item := range pods.Items { + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + out = append(out, dto.Pod{ + Name: item.Name, + Containers: containers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Started: item.Status.StartTime, + }) + } + + return out, nil + +} + +func (k *KubernetesClient) getPodsForNetworkPolicy(policy networkingv1.NetworkPolicy) ([]dto.Pod, error) { + pods, err := k.clientset.CoreV1().Pods(policy.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labels.Set(policy.Spec.PodSelector.MatchLabels).String(), + }) + + if err != nil { + return nil, err + } + + out := make([]dto.Pod, 0, len(pods.Items)) + for _, item := range pods.Items { + containers := make([]dto.Container, 0, len(item.Spec.Containers)) + + for _, cnt := range item.Spec.Containers { + env := make(map[string]string) + for _, envVar := range cnt.Env { + env[envVar.Name] = envVar.Value + } + + var status apiv1.ContainerStatus + for _, c := range item.Status.ContainerStatuses { + if c.Name == cnt.Name { + status = c + break + } + } + + containers = append(containers, dto.Container{ + Name: cnt.Name, + Image: cnt.Image, + Env: env, + Status: containerStatus(status), + }) + } + + out = append(out, dto.Pod{ + Name: item.Name, + Containers: containers, + Node: item.Spec.NodeName, + PodPhase: string(item.Status.Phase), + Started: item.Status.StartTime, + }) + } + + return out, nil +} + +func containerStatus(status apiv1.ContainerStatus) dto.ContainerStatus { + if status.State.Waiting != nil { + return dto.ContainerStatus{ + Status: status.State.Waiting.Reason, + Message: status.State.Waiting.Message, + Running: false, + } + } + + if status.State.Terminated != nil { + return dto.ContainerStatus{ + Status: status.State.Terminated.Reason, + Message: status.State.Terminated.Message, + Running: status.State.Terminated.ExitCode == 0, + } + } + + return dto.ContainerStatus{ + Status: "running", + Running: true, + } +} + +func getDeploymentStatus(pods []dto.Pod) bool { + for _, pod := range pods { + for _, container := range pod.Containers { + if !container.Status.Running { + return false + } + } + } + + return true +} + +func getDaemonSetStatus(pods []dto.Pod) bool { + if len(pods) == 0 { + return false + } + + for _, pod := range pods { + for _, container := range pod.Containers { + if !container.Status.Running { + return false + } + } + } + + return true +} + +func getPodStatus(containers []dto.Container) bool { + for _, container := range containers { + if !container.Status.Running { + return false + } + } + + return true +} diff --git a/cyclops-ctrl/pkg/cluster/k8sclient/templateauthrules.go b/cyclops-ctrl/pkg/cluster/k8sclient/templateauthrules.go new file mode 100644 index 000000000..6b00d084a --- /dev/null +++ b/cyclops-ctrl/pkg/cluster/k8sclient/templateauthrules.go @@ -0,0 +1,28 @@ +package k8sclient + +import ( + "context" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cyclopsv1alpha1 "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" +) + +func (k *KubernetesClient) ListTemplateAuthRules() ([]cyclopsv1alpha1.TemplateAuthRule, error) { + return k.moduleset.TemplateAuthRules(cyclopsNamespace).List(metav1.ListOptions{}) +} + +func (k *KubernetesClient) GetTemplateAuthRuleSecret(name, key string) (string, error) { + secret, err := k.clientset.CoreV1().Secrets(cyclopsNamespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + secretValue, ok := secret.Data[key] + if !ok { + return "", errors.New("key not found") + } + + return string(secretValue), err +} diff --git a/cyclops-ctrl/pkg/cluster/k8sclient/templatestore.go b/cyclops-ctrl/pkg/cluster/k8sclient/templatestore.go new file mode 100644 index 000000000..5cb152748 --- /dev/null +++ b/cyclops-ctrl/pkg/cluster/k8sclient/templatestore.go @@ -0,0 +1,32 @@ +package k8sclient + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + cyclopsv1alpha1 "github.com/cyclops-ui/cyclops/cyclops-ctrl/api/v1alpha1" +) + +func (k *KubernetesClient) ListTemplateStore() ([]cyclopsv1alpha1.TemplateStore, error) { + return k.moduleset.TemplateStore(cyclopsNamespace).List(metav1.ListOptions{}) +} + +func (k *KubernetesClient) CreateTemplateStore(ts *cyclopsv1alpha1.TemplateStore) error { + _, err := k.moduleset.TemplateStore(cyclopsNamespace).Create(ts) + return err +} + +func (k *KubernetesClient) UpdateTemplateStore(ts *cyclopsv1alpha1.TemplateStore) error { + curr, err := k.moduleset.TemplateStore(cyclopsNamespace).Get(ts.Name) + if err != nil { + return err + } + + ts.SetResourceVersion(curr.GetResourceVersion()) + + _, err = k.moduleset.TemplateStore(cyclopsNamespace).Update(ts) + return err +} + +func (k *KubernetesClient) DeleteTemplateStore(name string) error { + return k.moduleset.TemplateStore(cyclopsNamespace).Delete(name) +} diff --git a/cyctl/go.mod b/cyctl/go.mod index 93be3785a..bdb40a884 100644 --- a/cyctl/go.mod +++ b/cyctl/go.mod @@ -7,23 +7,29 @@ toolchain go1.22.2 require ( github.com/cyclops-ui/cyclops/cyclops-ctrl v0.0.0-20240421163218-f48a78b7c0e7 github.com/spf13/cobra v1.8.0 - k8s.io/api v0.30.0 - k8s.io/apiextensions-apiserver v0.29.0 - k8s.io/apimachinery v0.30.0 - k8s.io/client-go v0.30.0 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.30.1 + k8s.io/apiextensions-apiserver v0.30.1 + k8s.io/apimachinery v0.30.1 + k8s.io/client-go v0.30.1 + sigs.k8s.io/yaml v1.4.0 ) require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -32,18 +38,26 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -52,7 +66,7 @@ require ( k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/controller-runtime v0.15.0 // indirect + sigs.k8s.io/controller-runtime v0.18.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/cyctl/go.sum b/cyctl/go.sum index f3e276a86..38c6dc54b 100644 --- a/cyctl/go.sum +++ b/cyctl/go.sum @@ -1,5 +1,9 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyclops-ui/cyclops/cyclops-ctrl v0.0.0-20240421163218-f48a78b7c0e7 h1:Fbsdk4Z2x4q+ADgQon+EHKxhGRo1G0ai2+noJGi4sLY= @@ -11,8 +15,14 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= @@ -23,6 +33,9 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= @@ -60,6 +73,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -71,14 +86,22 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= -github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -97,9 +120,17 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -109,8 +140,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -138,6 +170,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -154,25 +188,25 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA= -k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE= -k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= -k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= -k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA= -k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ= -k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= -sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=