diff --git a/pkg/KubeArmorOperator/cmd/operator/main.go b/pkg/KubeArmorOperator/cmd/operator/main.go index 95f87fda88..8505573da8 100644 --- a/pkg/KubeArmorOperator/cmd/operator/main.go +++ b/pkg/KubeArmorOperator/cmd/operator/main.go @@ -30,6 +30,7 @@ var ExtClient *apiextensionsclientset.Clientset var Opv1Client *opv1client.Clientset var InitDeploy bool var LogLevel string +var ProviderHostname, ProviderEndpoint string // Cmd represents the base command when called without any subcommands var Cmd = &cobra.Command{ @@ -52,7 +53,7 @@ var Cmd = &cobra.Command{ return nil }, Run: func(cmd *cobra.Command, args []string) { - nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, PathPrefix, DeploymentName, InitDeploy) + nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, PathPrefix, DeploymentName, ProviderHostname, ProviderEndpoint, InitDeploy) go nodeWatcher.WatchConfigCrd() nodeWatcher.WatchNodes() @@ -78,6 +79,8 @@ func init() { Cmd.PersistentFlags().StringVar(&LsmOrder, "lsm", "bpf,apparmor,selinux", "lsm preference order to use") Cmd.PersistentFlags().StringVar(&PathPrefix, "pathprefix", "/rootfs/", "path prefix for runtime search") Cmd.PersistentFlags().StringVar(&DeploymentName, "deploymentName", "kubearmor-operator", "operator deployment name") + Cmd.PersistentFlags().StringVar(&ProviderHostname, "providerHostname", "", "IMDS URL hostname for retrieving cluster name") + Cmd.PersistentFlags().StringVar(&ProviderEndpoint, "providerEndpoint", "", "IMDS URL endpoint for retrieving cluster name") // TODO:- set initDeploy to false by default once this change is added to stable Cmd.PersistentFlags().BoolVar(&InitDeploy, "initDeploy", true, "Init container deployment") Cmd.PersistentFlags().StringVar(&LogLevel, "loglevel", "info", "log level, e.g., debug, info, warn, error") diff --git a/pkg/KubeArmorOperator/internal/controller/cluster.go b/pkg/KubeArmorOperator/internal/controller/cluster.go index fd1e5af0af..11787b65fa 100644 --- a/pkg/KubeArmorOperator/internal/controller/cluster.go +++ b/pkg/KubeArmorOperator/internal/controller/cluster.go @@ -38,6 +38,7 @@ var deployment_uuid types.UID var deployment_name string = "kubearmor-operator" var PathPrefix string var initDeploy bool +var ProviderHostname, ProviderEndpoint string type ClusterWatcher struct { Nodes []Node @@ -60,7 +61,7 @@ type Node struct { Seccomp string } -func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, pathPrefix, deploy_name string, initdeploy bool) *ClusterWatcher { +func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, pathPrefix, deploy_name, providerHostname, providerEndpoint string, initdeploy bool) *ClusterWatcher { if informer == nil { informer = informers.NewSharedInformerFactory(client, 0) } @@ -77,6 +78,9 @@ func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, ext PathPrefix = pathPrefix deployment_name = deploy_name initDeploy = initdeploy + ProviderHostname = providerHostname + ProviderEndpoint = providerEndpoint + return &ClusterWatcher{ Nodes: []Node{}, Daemonsets: make(map[string]int), diff --git a/pkg/KubeArmorOperator/internal/controller/resources.go b/pkg/KubeArmorOperator/internal/controller/resources.go index e848be8333..67e08612da 100644 --- a/pkg/KubeArmorOperator/internal/controller/resources.go +++ b/pkg/KubeArmorOperator/internal/controller/resources.go @@ -7,6 +7,9 @@ import ( "bytes" "context" "fmt" + "io" + "net/http" + "regexp" "strings" "time" @@ -469,6 +472,160 @@ func (clusterWatcher *ClusterWatcher) deployControllerDeployment(deployment *app return nil } +func (clusterWatcher *ClusterWatcher) getProvider(providerHostname, providerEndpoint string) (string, string, string) { + nodes, err := clusterWatcher.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + clusterWatcher.Log.Warnf("Error listing nodes: %s\n", err.Error()) + } + + for _, node := range nodes.Items { + for key, label := range node.Labels { + if strings.Contains(key, "gke") || strings.Contains(label, "gke") { + if providerHostname != "" && providerEndpoint == "" { + providerEndpoint = "/computeMetadata/v1/instance/attributes/cluster-name" + } else if providerHostname == "" && providerEndpoint != "" { + providerHostname = "http://metadata.google.internal" + } else if providerHostname == "" && providerEndpoint == "" { + providerHostname = "http://metadata.google.internal" + providerEndpoint = "/computeMetadata/v1/instance/attributes/cluster-name" + } + return "gke", providerHostname, providerEndpoint + } else if strings.Contains(key, "eks") || strings.Contains(label, "eks") { + if providerHostname != "" && providerEndpoint == "" { + providerEndpoint = "/latest/user-data" + } else if providerHostname == "" && providerEndpoint != "" { + providerHostname = "http://169.254.169.254/latest" + } else if providerHostname == "" && providerEndpoint == "" { + providerHostname = "http://169.254.169.254/latest" + providerEndpoint = "/latest/user-data" + } + return "eks", providerHostname, providerEndpoint + } + } + } + return "default", "", "" +} + +func (clusterWatcher *ClusterWatcher) fetchClusterNameFromGoogle(providerHostname, providerEndpoint string) (string, error) { + url := providerHostname + providerEndpoint + req, err := http.NewRequest("GET", url, nil) + if err != nil { + clusterWatcher.Log.Warnf("failed to create request: %w, check provider host name and endpoint", err) + return "", err + } + + // Set the required header + req.Header.Set("Metadata-Flavor", "Google") + + // Create an HTTP client and make the request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + clusterWatcher.Log.Warnf("error making request: %w, check provider host name and endpoint", err) + return "", err + } + defer resp.Body.Close() + + // Check for a successful response + if resp.StatusCode != http.StatusOK { + clusterWatcher.Log.Warnf("failed to fetch from metadata, status code: %d", resp.StatusCode) + return "", err + } + + // Read the response body + body, err := io.ReadAll(resp.Body) + if err != nil { + clusterWatcher.Log.Warnf("error reading response body: %w", err) + return "", err + } + + return string(body), nil +} + +func (clusterWatcher *ClusterWatcher) fetchClusterNameFromAWS(providerHostname, providerEndpoint string) (string, error) { + var token []byte + client := &http.Client{Timeout: 2 * time.Second} + req, err := http.NewRequest("PUT", providerHostname+"/latest/api/token", nil) + if err != nil { + clusterWatcher.Log.Warnf("failed to create request for fetching token: %w, check provider host name", err) + return "", err + } + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "21600") + + resp, err := client.Do(req) + if err != nil { + clusterWatcher.Log.Warnf("error making request: %w", err) + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + token, err = io.ReadAll(resp.Body) + if err != nil { + clusterWatcher.Log.Warnf("failed to read token: %d", err) + return "", err + } + } + + // Fetch the EKS cluster name from user data + url := providerHostname + providerEndpoint + req, err = http.NewRequest("GET", url, nil) + client = &http.Client{Timeout: 2 * time.Second} + if err != nil { + clusterWatcher.Log.Warnf("failed to create request for fetching metadata: %w, check provider host name and endpoint", err) + return "", err + } + req.Header.Set("X-aws-ec2-metadata-token", string(token)) + + resp, err = client.Do(req) + if err != nil { + clusterWatcher.Log.Warnf("error making request: %w", err) + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + clusterWatcher.Log.Warnf("failed to fetch from metadata, status code: %d", resp.StatusCode) + return "", err + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + clusterWatcher.Log.Warnf("failed to read metadata: %d", err) + return "", err + } + + // Extract EKS cluster name + re := regexp.MustCompile(`/etc/eks/bootstrap\.sh (\S+)`) + match := re.FindStringSubmatch(string(body)) + if len(match) > 0 { + return match[1], nil + } + + return "", err +} + +func (clusterWatcher *ClusterWatcher) GetClusterName(providerHostname, providerEndpoint string) string { + provider, pHostname, pEndpoint := clusterWatcher.getProvider(ProviderHostname, providerEndpoint) + if provider == "gke" { + clusterWatcher.Log.Infof("Provider is GKE") + if clusterName, err := clusterWatcher.fetchClusterNameFromGoogle(pHostname, pEndpoint); err != nil { + clusterWatcher.Log.Warnf("Cannot fetch cluster name for GKE %s", err.Error()) + } else { + return clusterName + } + } else if provider == "eks" { + clusterWatcher.Log.Infof("Provider is EKS") + if clusterName, err := clusterWatcher.fetchClusterNameFromAWS(pHostname, pEndpoint); err != nil { + clusterWatcher.Log.Warnf("Cannot fetch cluster name for EKS %s", err.Error()) + } else { + return clusterName + } + } + + return "default" +} + func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { var caCert, tlsCrt, tlsKey *bytes.Buffer var kGenErr, err, installErr error @@ -564,6 +721,7 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { // kubearmor configmap configmap := addOwnership(deployments.GetKubearmorConfigMap(common.Namespace, deployments.KubeArmorConfigMapName)).(*corev1.ConfigMap) configmap.Data = common.ConfigMapData + configmap.Data["cluster"] = clusterWatcher.GetClusterName(ProviderHostname, ProviderEndpoint) for { caCert, tlsCrt, tlsKey, kGenErr = common.GeneratePki(common.Namespace, deployments.KubeArmorControllerWebhookServiceName)