diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a99b0c516a..d5bba49517 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -87,7 +87,7 @@ jobs: run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape' - name: Build the Docker image - run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg run_number=${{ github.run_number }} + run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} - name: Re-Tag Image to latest run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest diff --git a/.github/workflows/build_dev.yaml b/.github/workflows/build_dev.yaml index e2840b0118..3facff1ea5 100644 --- a/.github/workflows/build_dev.yaml +++ b/.github/workflows/build_dev.yaml @@ -65,7 +65,7 @@ jobs: run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape' - name: Build the Docker image - run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg run_number=${{ github.run_number }} + run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} - name: Login to Quay.io env: diff --git a/README.md b/README.md index 78664813dd..ebaa5320e3 100644 --- a/README.md +++ b/README.md @@ -195,38 +195,40 @@ helm template bitnami/mysql --generate-name --dry-run | kubescape scan - ``` -### Offline Support +### Offline/Air-gaped Environment Support [Video tutorial](https://youtu.be/IGXL9s37smM) It is possible to run Kubescape offline! +#### Download all artifacts -First download the framework and then scan with `--use-from` flag - -1. Download and save in file, if file name not specified, will save in `~/.kubescape/.json` +1. Download and save in local directory, if path not specified, will save all in `~/.kubescape` ``` -kubescape download framework nsa --output nsa.json +kubescape download artifacts --output path/to/local/dir ``` +2. Copy the downloaded artifacts to the air-gaped/offline environment -2. Scan using the downloaded framework +3. Scan using the downloaded artifacts ``` -kubescape scan framework nsa --use-from nsa.json +kubescape scan --use-artifacts-from path/to/local/dir ``` +#### Download a single artifacts +You can also download a single artifacts and scan with the `--use-from` flag -You can also download all artifacts to a local path and then load them using `--use-artifacts-from` flag - -1. Download and save in local directory, if path not specified, will save all in `~/.kubescape` +1. Download and save in file, if file name not specified, will save in `~/.kubescape/.json` ``` -kubescape download artifacts --output path/to/local/dir +kubescape download framework nsa --output /path/nsa.json ``` +2. Copy the downloaded artifacts to the air-gaped/offline environment -2. Scan using the downloaded artifacts +3. Scan using the downloaded framework ``` -kubescape scan framework nsa --use-artifacts-from path/to/local/dir +kubescape scan framework nsa --use-from /path/nsa.json ``` + ## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd) [Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster) [helm chart repo](https://github.com/armosec/armo-helm) diff --git a/build/Dockerfile b/build/Dockerfile index 55e168c582..2047431093 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,9 +1,9 @@ FROM golang:1.17-alpine as builder #ENV GOPROXY=https://goproxy.io,direct -ARG run_number +ARG image_version -ENV RELEASE=v1.0.${run_number} +ENV RELEASE=image_version ENV GO111MODULE= diff --git a/cautils/logger/prettylogger/logger.go b/cautils/logger/prettylogger/logger.go index 4ae10bf7ea..795ff4ac75 100644 --- a/cautils/logger/prettylogger/logger.go +++ b/cautils/logger/prettylogger/logger.go @@ -57,7 +57,10 @@ func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helper if !level.Skip(pl.level) { pl.mutex.Lock() prefix(level)(pl.writer, "[%s] ", level.String()) - message(pl.writer, fmt.Sprintf("%s. %s\n", msg, detailsToString(details))) + if d := detailsToString(details); d != "" { + msg = fmt.Sprintf("%s. %s", msg, d) + } + message(pl.writer, fmt.Sprintf("%s\n", msg)) pl.mutex.Unlock() } @@ -68,7 +71,7 @@ func detailsToString(details []helpers.IDetails) string { for i := range details { s += fmt.Sprintf("%s: %s", details[i].Key(), details[i].Value()) if i < len(details)-1 { - s += ";" + s += "; " } } return s diff --git a/clihandler/cmd/download.go b/clihandler/cmd/download.go index 8008554bed..8cd6b8fecb 100644 --- a/clihandler/cmd/download.go +++ b/clihandler/cmd/download.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "path/filepath" "strings" "github.com/armosec/kubescape/cautils" @@ -65,10 +66,16 @@ var downloadCmd = &cobra.Command{ } func init() { - // cobra.OnInitialize(initConfig) + cobra.OnInitialize(initDownload) rootCmd.AddCommand(downloadCmd) downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file") downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/.json`") } + +func initDownload() { + if filepath.Ext(downloadInfo.Path) == ".json" { + downloadInfo.Path, downloadInfo.FileName = filepath.Split(downloadInfo.Path) + } +} diff --git a/clihandler/cmd/version.go b/clihandler/cmd/version.go index e2e7bb6337..7881ce3a2e 100644 --- a/clihandler/cmd/version.go +++ b/clihandler/cmd/version.go @@ -1,8 +1,10 @@ package cmd import ( + "fmt" + "os" + "github.com/armosec/kubescape/cautils" - "github.com/armosec/kubescape/cautils/logger" "github.com/spf13/cobra" ) @@ -13,7 +15,7 @@ var versionCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { v := cautils.NewIVersionCheckHandler() v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version")) - logger.L().Info("Your current version is: " + cautils.BuildNumber) + fmt.Fprintln(os.Stdout, "Your current version is: "+cautils.BuildNumber) return nil }, } diff --git a/clihandler/initcli.go b/clihandler/initcli.go index 784a1c9e9b..a3398fc137 100644 --- a/clihandler/initcli.go +++ b/clihandler/initcli.go @@ -36,6 +36,7 @@ type componentInterfaces struct { func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces { + // ================== setup k8s interface object ====================================== var k8s *k8sinterface.KubernetesApi if scanInfo.GetScanningEnvironment() == cautils.ScanCluster { k8s = getKubernetesApi() @@ -44,11 +45,20 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces { } } + // ================== setup tenant object ====================================== + tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s) // Set submit behavior AFTER loading tenant config setSubmitBehavior(scanInfo, tenantConfig) + // ================== version testing ====================================== + + v := cautils.NewIVersionCheckHandler() + v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment())) + + // ================== setup host sensor object ====================================== + hostSensorHandler := getHostSensorHandler(scanInfo, k8s) if err := hostSensorHandler.Init(); err != nil { logger.L().Error("failed to init host sensor", helpers.Error(err)) @@ -59,24 +69,29 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces { scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace()) } + // ================== setup registry adaptors ====================================== + registryAdaptors, err := resourcehandler.NewRegistryAdaptors() if err != nil { logger.L().Error("failed to initialize registry adaptors", helpers.Error(err)) } + // ================== setup resource collector object ====================================== + resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler, registryAdaptors) + // ================== setup reporter & printer objects ====================================== + // reporting behavior - setup reporter reportHandler := getReporter(tenantConfig, scanInfo.Submit) - v := cautils.NewIVersionCheckHandler() - v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment())) - // setup printer printerHandler := printerv1.GetPrinter(scanInfo.Format, scanInfo.VerboseMode) // printerHandler = printerv2.GetPrinter(scanInfo.Format, scanInfo.VerboseMode) printerHandler.SetWriter(scanInfo.Output) + // ================== return interface ====================================== + return componentInterfaces{ tenantConfig: tenantConfig, resourceHandler: resourceHandler, diff --git a/clihandler/initcliutils.go b/clihandler/initcliutils.go index cb1807e9dc..a03a5d6243 100644 --- a/clihandler/initcliutils.go +++ b/clihandler/initcliutils.go @@ -8,6 +8,7 @@ import ( "github.com/armosec/kubescape/cautils" "github.com/armosec/kubescape/cautils/getter" "github.com/armosec/kubescape/cautils/logger" + "github.com/armosec/kubescape/cautils/logger/helpers" "github.com/armosec/kubescape/hostsensorutils" "github.com/armosec/kubescape/resourcehandler" "github.com/armosec/kubescape/resultshandling/reporter" @@ -58,6 +59,7 @@ func getReporter(tenantConfig cautils.ITenantConfig, submit bool) reporter.IRepo func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler { if len(scanInfo.InputPatterns) > 0 || k8s == nil { + // scanInfo.HostSensor.SetBool(false) return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns, registryAdaptors) } getter.GetArmoAPIConnector() @@ -197,7 +199,7 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter { if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache - cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n") + logger.L().Warning("failed to get policies from github release, loading policies from cache", helpers.Error(err)) return getter.NewLoadPolicy(getDefaultFrameworksPaths()) } else { return downloadReleasedPolicy diff --git a/docs/summary.png b/docs/summary.png index 2449b98a92..565d0ae2f6 100644 Binary files a/docs/summary.png and b/docs/summary.png differ diff --git a/go.mod b/go.mod index 34e8d788e0..35cdc24cdd 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/armosec/armoapi-go v0.0.49 github.com/armosec/k8s-interface v0.0.60 - github.com/armosec/opa-utils v0.0.107 + github.com/armosec/opa-utils v0.0.110 github.com/armosec/rbac-utils v0.0.14 github.com/armosec/utils-go v0.0.3 github.com/armosec/utils-k8s-go v0.0.1 diff --git a/go.sum b/go.sum index b28bd6fe0c..c1d4f78fd1 100644 --- a/go.sum +++ b/go.sum @@ -93,8 +93,8 @@ github.com/armosec/k8s-interface v0.0.50/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2 github.com/armosec/k8s-interface v0.0.60 h1:jTCiO15QQbHVuxFQ928rp4srf1rQoUzeybfcbv/cuss= github.com/armosec/k8s-interface v0.0.60/go.mod h1:g0jv/fG+VqpT5ivO6D2gJcJ/w68BiffDz+PcU9YFbL4= github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c= -github.com/armosec/opa-utils v0.0.107 h1:P+SACquUDMbXcOYIbQ+uzwcdJlrguXOTI42PHEJG2NU= -github.com/armosec/opa-utils v0.0.107/go.mod h1:Wc1P4gkB6UQeGW8I76zCuitGGl15Omp0bKw7N0tR9dk= +github.com/armosec/opa-utils v0.0.110 h1:qncGcbnYjiGULP3yK+4geRNNpRoWqKXQL+Xg+iXc1cM= +github.com/armosec/opa-utils v0.0.110/go.mod h1:Wc1P4gkB6UQeGW8I76zCuitGGl15Omp0bKw7N0tR9dk= github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40= github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90= github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0= diff --git a/hostsensorutils/hostsensor.yaml b/hostsensorutils/hostsensor.yaml index 3261b83fac..bc03b94cdf 100644 --- a/hostsensorutils/hostsensor.yaml +++ b/hostsensorutils/hostsensor.yaml @@ -42,7 +42,7 @@ spec: containerPort: 7888 resources: limits: - cpu: 1m + cpu: 0.1m memory: 200Mi requests: cpu: 1m diff --git a/hostsensorutils/hostsensordeploy.go b/hostsensorutils/hostsensordeploy.go index 1d029f1a99..ca919dd8e0 100644 --- a/hostsensorutils/hostsensordeploy.go +++ b/hostsensorutils/hostsensordeploy.go @@ -27,13 +27,14 @@ var ( ) type HostSensorHandler struct { - HostSensorPort int32 - HostSensorPodNames map[string]string //map from pod names to node names - IsReady <-chan bool //readonly chan - k8sObj *k8sinterface.KubernetesApi - DaemonSet *appsv1.DaemonSet - podListLock sync.RWMutex - gracePeriod int64 + HostSensorPort int32 + HostSensorPodNames map[string]string //map from pod names to node names + HostSensorUnshedulePodNames map[string]string //map from pod names to node names + IsReady <-chan bool //readonly chan + k8sObj *k8sinterface.KubernetesApi + DaemonSet *appsv1.DaemonSet + podListLock sync.RWMutex + gracePeriod int64 } func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandler, error) { @@ -42,9 +43,10 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandle return nil, fmt.Errorf("nil k8s interface received") } hsh := &HostSensorHandler{ - k8sObj: k8sObj, - HostSensorPodNames: map[string]string{}, - gracePeriod: int64(15), + k8sObj: k8sObj, + HostSensorPodNames: map[string]string{}, + HostSensorUnshedulePodNames: map[string]string{}, + gracePeriod: int64(15), } // Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!! if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 { @@ -140,12 +142,17 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error { } hsh.podListLock.RLock() podsNum := len(hsh.HostSensorPodNames) + unschedPodNum := len(hsh.HostSensorUnshedulePodNames) hsh.podListLock.RUnlock() - if len(nodesList.Items) == podsNum { + if len(nodesList.Items) <= podsNum+unschedPodNum { break } if time.Now().After(deadline) { - return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded", podsNum, len(nodesList.Items)) + hsh.podListLock.RLock() + podsMap := hsh.HostSensorPodNames + hsh.podListLock.RUnlock() + return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded. We will take data only from the pods below: %v", + podsNum, len(nodesList.Items), podsMap) } time.Sleep(100 * time.Millisecond) } @@ -156,12 +163,17 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error { func (hsh *HostSensorHandler) populatePodNamesToNodeNames() { go func() { - watchRes, err := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{ + var watchRes watch.Interface + var err error + watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{ Watch: true, LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]), }) if err != nil { - logger.L().Error("failed to watch over daemonset pods", helpers.Error(err)) + logger.L().Error("failed to watch over daemonset pods - are we missing watch pods permissions?", helpers.Error(err)) + } + if watchRes == nil { + return } for eve := range watchRes.ResultChan() { pod, ok := eve.Object.(*corev1.Pod) @@ -179,10 +191,31 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, p switch eventType { case watch.Added, watch.Modified: - if podObj.Status.Phase == corev1.PodRunning && podObj.Status.ContainerStatuses[0].Ready { + if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 && + podObj.Status.ContainerStatuses[0].Ready { hsh.HostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName + delete(hsh.HostSensorUnshedulePodNames, podObj.ObjectMeta.Name) } else { - delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name) + if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 && + podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable { + nodeName := "" + if podObj.Spec.Affinity != nil && podObj.Spec.Affinity.NodeAffinity != nil && + podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil && + len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 && + len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields) > 0 && + len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 { + nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0] + } + logger.L().Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node", + helpers.String("message", podObj.Status.Conditions[0].Message), + helpers.String("nodeName", nodeName), + helpers.String("podName", podObj.ObjectMeta.Name)) + if nodeName != "" { + hsh.HostSensorUnshedulePodNames[podObj.ObjectMeta.Name] = nodeName + } + } else { + delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name) + } } default: delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name) diff --git a/hostsensorutils/hostsensorgetfrompod.go b/hostsensorutils/hostsensorgetfrompod.go index e1cec0bb66..47e63fa8c3 100644 --- a/hostsensorutils/hostsensorgetfrompod.go +++ b/hostsensorutils/hostsensorgetfrompod.go @@ -157,7 +157,7 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv return res, nil } - logger.L().Info("Accessing host sensor") + logger.L().Debug("Accessing host sensor") cautils.StartSpinner() defer cautils.StopSpinner() kcData, err := hsh.GetKubeletConfigurations() @@ -197,6 +197,6 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv res = append(res, kcData...) // finish - logger.L().Success("Read host information from host sensor") + logger.L().Debug("Done reading information from host sensor") return res, nil } diff --git a/install.sh b/install.sh index 452335f732..741ae52aa8 100755 --- a/install.sh +++ b/install.sh @@ -54,6 +54,6 @@ echo -e "\033[0m" $KUBESCAPE_EXEC version echo -echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --submit" +echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --submit --enable-host-scan" echo -e "\033[0m" diff --git a/resourcehandler/ekssupport.go b/resourcehandler/ekssupport.go index 875798134b..21efc22192 100644 --- a/resourcehandler/ekssupport.go +++ b/resourcehandler/ekssupport.go @@ -58,15 +58,18 @@ func NewEKSProviderContext() *EKSProviderContext { } func (eksProviderContext *EKSProviderContext) getKubeClusterName() string { - cluster := k8sinterface.GetCurrentContext().Cluster - var splittedCluster []string + context := k8sinterface.GetCurrentContext() + if context == nil { + return "" + } + cluster := context.Cluster if cluster != "" { - splittedCluster = strings.Split(cluster, ".") + splittedCluster := strings.Split(cluster, ".") if len(splittedCluster) > 1 { return splittedCluster[0] } } - splittedCluster = strings.Split(k8sinterface.GetClusterName(), ".") + splittedCluster := strings.Split(k8sinterface.GetClusterName(), ".") if len(splittedCluster) > 1 { return splittedCluster[0] } @@ -78,9 +81,8 @@ func (eksProviderContext *EKSProviderContext) getKubeCluster() string { if context == nil { return "" } - cluster := context.Cluster - if cluster != "" { - return cluster + if context.Cluster != "" { + return context.Cluster } return k8sinterface.GetClusterName() } diff --git a/resourcehandler/gkesupport.go b/resourcehandler/gkesupport.go index 0dda579e9f..07dd0c615b 100644 --- a/resourcehandler/gkesupport.go +++ b/resourcehandler/gkesupport.go @@ -89,8 +89,7 @@ func (gkeProviderContext *GKEProviderContext) getKubeClusterName() string { if len(parsedName) < 3 { return "" } - clusterName = parsedName[3] - return clusterName + return parsedName[3] } func (gkeProviderContext *GKEProviderContext) getKubeCluster() string { @@ -98,9 +97,8 @@ func (gkeProviderContext *GKEProviderContext) getKubeCluster() string { if context == nil { return "" } - cluster := context.Cluster - if cluster != "" { - return cluster + if context.Cluster != "" { + return context.Cluster } return k8sinterface.GetClusterName() diff --git a/resourcehandler/k8sresources.go b/resourcehandler/k8sresources.go index c37b718795..2e41daea56 100644 --- a/resourcehandler/k8sresources.go +++ b/resourcehandler/k8sresources.go @@ -80,7 +80,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.F } cautils.StopSpinner() - logger.L().Success("Accessed successfully to Kubernetes objects") + logger.L().Success("Accessed to Kubernetes objects") return k8sResourcesMap, allResources, nil } diff --git a/resourcehandler/urlloader.go b/resourcehandler/urlloader.go index 9c1761142f..27b2a33219 100644 --- a/resourcehandler/urlloader.go +++ b/resourcehandler/urlloader.go @@ -28,11 +28,11 @@ func listUrls(patterns []string) []string { urls := []string{} for i := range patterns { if strings.HasPrefix(patterns[i], "http") { - if !isYaml(patterns[i]) || !isJson(patterns[i]) { // if url of repo + if !isYaml(patterns[i]) && !isJson(patterns[i]) { // if url of repo if yamls, err := ScanRepository(patterns[i], ""); err == nil { // TODO - support branch urls = append(urls, yamls...) } else { - fmt.Print(err) // TODO - handle errors + logger.L().Error(err.Error()) } } else { // url of single file urls = append(urls, patterns[i])