From 56a05dd8974215f0afdbe00610a5d1dea74b76de Mon Sep 17 00:00:00 2001 From: Matthieu Huin Date: Tue, 12 Dec 2023 20:33:28 +0100 Subject: [PATCH] CLI: add "nodepool get" subcommands The following subcommands are available: * main.go nodepool get providers-secrets * main.go nodepool get builder-ssh-key Change-Id: I897d92c64a172aae2d1b8f44b8b31203d648d989 --- cli/cmd/nodepool.go | 140 ++++++++++++++++++ cli/cmd/root.go | 80 ++++++++++ doc/deployment/nodepool.md | 6 + doc/reference/cli/index.md | 3 +- doc/reference/cli/main.md | 50 ++++++- main.go | 3 + .../tasks/main.yaml | 4 +- .../tasks/main.yaml | 2 +- 8 files changed, 280 insertions(+), 8 deletions(-) create mode 100644 cli/cmd/nodepool.go diff --git a/cli/cmd/nodepool.go b/cli/cmd/nodepool.go new file mode 100644 index 00000000..9c208530 --- /dev/null +++ b/cli/cmd/nodepool.go @@ -0,0 +1,140 @@ +// Copyright (C) 2023 Red Hat +// SPDX-License-Identifier: Apache-2.0 + +package cmd + +/* +"nodepool" subcommands can be used to interact with and configure the Nodepool component of a SF deployment. +*/ + +import ( + "context" + "errors" + "os" + + apiv1 "k8s.io/api/core/v1" + + "github.com/softwarefactory-project/sf-operator/controllers" + "github.com/spf13/cobra" + ctrl "sigs.k8s.io/controller-runtime" +) + +func get(kmd *cobra.Command, args []string) { + cliCtx, err := GetCLIContext(kmd) + if err != nil { + ctrl.Log.Error(err, "Error initializing:") + os.Exit(1) + } + argumentError := errors.New("argument must be in: providers-secrets, builder-ssh-key") + if len(args) != 1 { + ctrl.Log.Error(argumentError, "Need one argument") + os.Exit(1) + } + target := args[0] + ns := cliCtx.Namespace + kubeContext := cliCtx.KubeContext + if target == "providers-secrets" { + cloudsFile, _ := kmd.Flags().GetString("clouds") + if cloudsFile == "" { + cloudsFile = cliCtx.Components.Nodepool.CloudsFile + } + kubeFile, _ := kmd.Flags().GetString("kube") + if kubeFile == "" { + kubeFile = cliCtx.Components.Nodepool.KubeFile + } + getProvidersSecret(ns, kubeContext, cloudsFile, kubeFile) + } else if target == "builder-ssh-key" { + pubKey, _ := kmd.Flags().GetString("pubkey") + getBuilderSSHKey(ns, kubeContext, pubKey) + } else { + ctrl.Log.Error(argumentError, "Unknown argument "+target) + os.Exit(1) + } +} + +func getProvidersSecret(ns string, kubeContext string, cloudsFile string, kubeFile string) { + sfEnv := ENV{ + Cli: CreateKubernetesClientOrDie(kubeContext), + Ctx: context.TODO(), + Ns: ns, + } + var secret apiv1.Secret + if GetMOrDie(&sfEnv, controllers.NodepoolProvidersSecretsName, &secret) { + if len(secret.Data["clouds.yaml"]) > 0 { + if cloudsFile == "" { + println("clouds.yaml:") + println(string(secret.Data["clouds.yaml"])) + } else { + // TODO before we write to file, we should ensure the file, if it exists, is older than + // the upstream secret to avoid losing more recent secrets. + os.WriteFile(cloudsFile, secret.Data["clouds.yaml"], 0600) + ctrl.Log.Info("File " + cloudsFile + " updated") + } + } + if len(secret.Data["kube.config"]) > 0 { + if kubeFile == "" { + println("kube.config:") + println(string(secret.Data["kube.config"])) + } else { + os.WriteFile(kubeFile, secret.Data["kube.config"], 0644) + ctrl.Log.Info("File " + kubeFile + " updated") + } + } + } else { + ctrl.Log.Error(errors.New("Secret "+controllers.NodepoolProvidersSecretsName+" not found in namespace "+ns), + "Error fetching providers secrets") + os.Exit(1) + } +} + +func getBuilderSSHKey(ns string, kubeContext string, pubKey string) { + sfEnv := ENV{ + Cli: CreateKubernetesClientOrDie(kubeContext), + Ctx: context.TODO(), + Ns: ns, + } + var secret apiv1.Secret + if GetMOrDie(&sfEnv, "nodepool-builder-ssh-key", &secret) { + if pubKey == "" { + println(string(secret.Data["pub"])) + } else { + os.WriteFile(pubKey, secret.Data["pub"], 0600) + ctrl.Log.Info("File " + pubKey + " saved") + } + } else { + ctrl.Log.Error(errors.New("Secret nodepool-builder-ssh-key not found in namespace "+ns), + "Error fetching builder SSH key") + os.Exit(1) + } +} + +func MkNodepoolCmd() *cobra.Command { + + var ( + cloudsOutput string + kubeconfigOutput string + builderPubKey string + + nodepoolCmd = &cobra.Command{ + Use: "nodepool", + Short: "Nodepool subcommands", + Long: `These subcommands can be used to interact with the Nodepool component of a Software Factory deployment.`, + } + createCmd, configureCmd, getCmd = GetCRUDSubcommands() + ) + + getCmd.Run = get + getCmd.Use = "get {providers-secrets, builder-ssh-key}" + getCmd.Long = "Get a Nodepool resource. The resource can be the providers secrets or the builder's public SSH key." + getCmd.ValidArgs = []string{"providers-secrets", "builder-ssh-key"} + getCmd.Flags().StringVar(&cloudsOutput, "clouds", "", "(use with providers-secrets) File where to dump the clouds secrets") + getCmd.Flags().StringVar(&kubeconfigOutput, "kube", "", "(use with providers-secrets) File where to dump the kube secrets") + getCmd.Flags().StringVar(&builderPubKey, "pubkey", "", "(use with builder-ssh-key) File where to dump nodepool-builder's SSH public key") + + createCmd.AddCommand(getCmd) + + nodepoolCmd.AddCommand(createCmd) + nodepoolCmd.AddCommand(configureCmd) + nodepoolCmd.AddCommand(getCmd) + return nodepoolCmd +} diff --git a/cli/cmd/root.go b/cli/cmd/root.go index 50cfdaea..1c28c675 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -18,11 +18,27 @@ limitations under the License. package cmd import ( + "context" "errors" + "os" + apierrors "k8s.io/apimachinery/pkg/api/errors" + + apiroutev1 "github.com/openshift/api/route/v1" "github.com/spf13/cobra" "github.com/spf13/viper" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + + "sigs.k8s.io/controller-runtime/pkg/client" + + monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + + opv1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + sfv1 "github.com/softwarefactory-project/sf-operator/api/v1" + controllers "github.com/softwarefactory-project/sf-operator/controllers" ) // CLI config struct @@ -104,6 +120,10 @@ func GetCLIContext(command *cobra.Command) (SoftwareFactoryConfigContext, error) if cliContext.Namespace == "" { cliContext.Namespace = ns } + kubeContext, _ := command.Flags().GetString("kube-context") + if cliContext.KubeContext == "" { + cliContext.KubeContext = kubeContext + } fqdn, _ := command.Flags().GetString("fqdn") if fqdn == "" { fqdn = "sfop.me" @@ -113,3 +133,63 @@ func GetCLIContext(command *cobra.Command) (SoftwareFactoryConfigContext, error) } return cliContext, nil } + +func GetCRUDSubcommands() (*cobra.Command, *cobra.Command, *cobra.Command) { + createCmd := &cobra.Command{ + Use: "create", + Short: "Create a resource", + } + configureCmd := &cobra.Command{ + Use: "configure", + Short: "Configure a resource", + } + getCmd := &cobra.Command{ + Use: "get", + Short: "Get a resource", + } + return createCmd, configureCmd, getCmd +} + +// Moving code from cli/sfconfig/cmd/utils/utils.go as we need it to avoid dead code +type ENV struct { + Cli client.Client + Ns string + Ctx context.Context +} + +func CreateKubernetesClient(contextName string) (client.Client, error) { + scheme := runtime.NewScheme() + monitoring.AddToScheme(scheme) + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(apiroutev1.AddToScheme(scheme)) + utilruntime.Must(opv1.AddToScheme(scheme)) + utilruntime.Must(sfv1.AddToScheme(scheme)) + var conf = controllers.GetConfigContextOrDie(contextName) + return client.New(conf, client.Options{ + Scheme: scheme, + }) +} + +func CreateKubernetesClientOrDie(contextName string) client.Client { + cli, err := CreateKubernetesClient(contextName) + if err != nil { + ctrl.Log.Error(err, "Error creating Kubernetes client") + os.Exit(1) + } + return cli +} + +func GetMOrDie(env *ENV, name string, obj client.Object) bool { + err := env.Cli.Get(env.Ctx, + client.ObjectKey{ + Name: name, + Namespace: env.Ns, + }, obj) + if apierrors.IsNotFound(err) { + return false + } else if err != nil { + ctrl.Log.Error(err, "Error while fetching object "+name) + os.Exit(1) + } + return true +} diff --git a/doc/deployment/nodepool.md b/doc/deployment/nodepool.md index 0e5c6153..1a43d91f 100644 --- a/doc/deployment/nodepool.md +++ b/doc/deployment/nodepool.md @@ -86,6 +86,12 @@ Here is the command to fetch the builder SSH public key: kubectl get secret nodepool-builder-ssh-key -n sf -o jsonpath={.data.pub} | base64 -d ``` +or + +```sh +go run ./main.go --namespace sf nodepool get builder-ssh-key +``` + ## Accept an image-builder's SSH Host key Once an account has been created to an `image-builder` host the `nodepool-builder` must trust the SSH Host key before being able to connect. Run the following command to initiate a SSH connection and trust the host key: diff --git a/doc/reference/cli/index.md b/doc/reference/cli/index.md index 3a7a6cf6..41277cdd 100644 --- a/doc/reference/cli/index.md +++ b/doc/reference/cli/index.md @@ -1,7 +1,6 @@ # sfconfig -> [!NOTE] -> As per [ADR12](../../adr/0012-CLI-overhaul.md) this CLI is getting phased out. +> ⚠️ As per [ADR12](../../adr/0012-CLI-overhaul.md) this CLI is getting phased out. > Its features are being reworked into the `main` binary of the operator. > Temporary documentation on this Work-In-Progress can be found [here](./main.md). diff --git a/doc/reference/cli/main.md b/doc/reference/cli/main.md index 4bcbc630..496c8bcb 100644 --- a/doc/reference/cli/main.md +++ b/doc/reference/cli/main.md @@ -10,8 +10,9 @@ deployments, beyond what can be defined in a custom resource manifest. 1. [Configuration File](#configuration-file) 1. [Subcommands](#subcommands) 1. [Apply](#apply) - 1. [Operator](#apply) 1. [Backup](#backup) + 1. [Nodepool](#nodepool) + 1. [Operator](#apply) 1. [Restore](#restore) ## Running the CLI @@ -29,6 +30,7 @@ These flags apply | Argument | Type | Description | Optional | Default | |----------|------|-------|----|----| |-n, --namespace |string | The namespace on which to perform actions | Dependent |-| +|-k, --kube-context |string | The cluster context on which to operate | Dependent |-| |-d, --fqdn | string | The FQDN of the deployment (if no manifest is provided) | Yes | sfop.me | |-C, --config | string | Path to the CLI configuration file | Yes | - | |-c, --context | string | Context to use in the configuration file. Defaults to the "default-context" value in the config file if set, or the first available context | Yes | Dependent | @@ -105,6 +107,49 @@ Flags: |----------|------|-------|----|----| |--cr |string | The path to the custom resource to apply | No | If a config file is used and the flag not provided, will default to the context's `manifest-file` if set | +### Backup + +Not implemented yet + +### Nodepool + +The `nodepool` subcommand can be used to interact with the Nodepool component of a Software Factory deployment. + +#### get builder-ssh-key + +The Nodepool builder component should be used with at least one `image-builder` companion machine. +It must have the capablility to connect via SSH to the builder machine(s). In order to do so, you need +to install the builder's SSH public key as an authorized key on the builder machine(s). This subcommand +fetches that key and can save it to a speficied file path. + +```sh +go run ./main.go [GLOBAL FLAGS] nodepool get builder-ssh-key [--pubkey /path/to/key] +``` + +Flags: + +| Argument | Type | Description | Optional | Default | +|----------|------|-------|----|----| +| --pubkey | string | The destination file where to save the builder's public key | yes | - | + +#### get providers-secrets + +Get the currently set providers secrets (OpenStack's clouds.yaml and Kubernetes/OpenShift's kube.config) and optionally +write the secrets to a local file. + +> ⚠️ The local files will be overwritten with the downloaded contents without warning! + +```sh +go run ./main.go [GLOBAL FLAGS] nodepool get providers-secrets [--kube /path/to/kube.config --clouds /path/to/clouds.yaml] +``` + +Flags: + +| Argument | Type | Description | Optional | Default | +|----------|------|-------|----|----| +| --kube | string | The destination file where to save nodepool's kube.config | yes | - | +| --clouds | string | The destination file where to save nodepool's clouds.yaml | yes | - | + ### Operator To start the operator controller locally, run: @@ -120,9 +165,6 @@ Flags: |--metrics-bind-address |string | The address the metric endpoint binds to. | Yes | :8080 | |--health-probe-bind-address |string | The address the probe endpoint binds to. | Yes | :8081 | |--leader-elect |boolean | Enable leader election for controller manager. | Yes | false | -### Backup - -Not implemented yet ### Restore diff --git a/main.go b/main.go index 19ddd3f5..8392ff2e 100644 --- a/main.go +++ b/main.go @@ -60,6 +60,7 @@ func main() { enableLeaderElection bool probeAddr string ns string + kubeContext string fqdn string cliContext string configFile string @@ -79,6 +80,7 @@ func main() { // Global flags rootCmd.PersistentFlags().StringVarP(&ns, "namespace", "n", "", "The namespace on which to perform actions.") + rootCmd.PersistentFlags().StringVarP(&kubeContext, "kube-context", "k", "", "The cluster context to use to perform calls to the K8s API.") rootCmd.PersistentFlags().StringVarP(&fqdn, "fqdn", "d", "", "The FQDN of the deployment (if no manifest is provided).") rootCmd.PersistentFlags().StringVarP(&configFile, "config", "C", "", "Path to the CLI configuration file.") rootCmd.PersistentFlags().StringVarP(&cliContext, "context", "c", "", "Context to use in the configuration file. Defaults to the \"default-context\" value in the config file if set, or the first available context in the config file.") @@ -96,6 +98,7 @@ func main() { cmd.MkApplyCmd(), cmd.MkBackupCmd(), cmd.MkRestoreCmd(), + cmd.MkNodepoolCmd(), } for _, c := range subcommands { rootCmd.AddCommand(c) diff --git a/roles/health-check/config-update-nodepool-builder/tasks/main.yaml b/roles/health-check/config-update-nodepool-builder/tasks/main.yaml index 68e55bd0..7f067cca 100644 --- a/roles/health-check/config-update-nodepool-builder/tasks/main.yaml +++ b/roles/health-check/config-update-nodepool-builder/tasks/main.yaml @@ -41,7 +41,9 @@ # As nodepool builder will connected to the image-builder node (which is the microshift node in CI usecase) # here we ensure that the nodepool-builder pod can connect - name: Get nodepool-builder public SSH key - shell: kubectl get secret nodepool-builder-ssh-key -n sf -o jsonpath={.data.pub} | base64 -d + ansible.builtin.command: go run ./main.go --namespace sf nodepool get builder-ssh-key + args: + chdir: "{{ zuul.project.src_dir }}" register: nodepool_get_key - name: Ensure nodepool user available on the controller diff --git a/roles/health-check/test-nodepool-providers-secrets/tasks/main.yaml b/roles/health-check/test-nodepool-providers-secrets/tasks/main.yaml index 5f9645fc..e94274c5 100644 --- a/roles/health-check/test-nodepool-providers-secrets/tasks/main.yaml +++ b/roles/health-check/test-nodepool-providers-secrets/tasks/main.yaml @@ -6,7 +6,7 @@ - name: Dump current secrets from nodepool command: > - tools/sfconfig --config /tmp/sfconfig.yaml nodepool-providers-secrets --dump + go run ./main.go --namespace sf nodepool get providers-secrets --clouds /tmp/clouds.yaml --kube /tmp/kubeconfig.yaml args: chdir: "{{ zuul.project.src_dir }}"