diff --git a/cmd/cluster/cmd.go b/cmd/cluster/cmd.go index 35e5e1a5e..46b6b065e 100644 --- a/cmd/cluster/cmd.go +++ b/cmd/cluster/cmd.go @@ -31,6 +31,7 @@ func NewCmdCluster(streams genericclioptions.IOStreams, client *k8s.LazyClient, clusterCmd.AddCommand(newCmdResync()) clusterCmd.AddCommand(newCmdContext()) clusterCmd.AddCommand(newCmdTransferOwner(streams, globalOpts)) + clusterCmd.AddCommand(newCmdUpdatePullSecret(streams, globalOpts)) clusterCmd.AddCommand(access.NewCmdAccess(streams, client)) clusterCmd.AddCommand(newCmdCpd()) clusterCmd.AddCommand(newCmdCheckBannedUser()) diff --git a/cmd/cluster/transferowner.go b/cmd/cluster/transferowner.go index 07317329e..6897ed2ff 100644 --- a/cmd/cluster/transferowner.go +++ b/cmd/cluster/transferowner.go @@ -1,15 +1,22 @@ package cluster +/* + * Relevant documentation: + * https://github.com/openshift/ops-sop/blob/master/v4/howto/transfer_cluster_ownership.md + */ + import ( "bufio" "context" "encoding/json" + "errors" "fmt" "math/rand" "os" "strings" "time" + "github.com/fatih/color" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" hiveapiv1 "github.com/openshift/hive/apis/hive/v1" hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" @@ -33,6 +40,8 @@ import ( "github.com/openshift/osdctl/internal/utils/globalflags" "github.com/openshift/osdctl/pkg/utils" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) const ( @@ -40,30 +49,50 @@ const ( SL_TRANSFER_INITIATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_starting.json" SL_TRANSFER_COMPLETE = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/clustertransfer_completed.json" + SL_PULLSEC_ROTATED = "https://raw.githubusercontent.com/openshift/managed-notifications/refs/heads/master/osd/pull_secret_rotated.json" ) // transferOwnerOptions defines the struct for running transferOwner command type transferOwnerOptions struct { - output string - clusterID string - oldOwnerName string - newOwnerName string - reason string - dryrun bool - hypershift bool - cluster *cmv1.Cluster + clusterID string + oldOwnerName string + newOwnerName string + reason string + dryrun bool + hypershift bool + doPullSecretOnly bool + opDescription string + cluster *cmv1.Cluster genericclioptions.IOStreams GlobalOptions *globalflags.GlobalOptions } +var red *color.Color +var blue *color.Color +var green *color.Color + +const transferOwnerDocs = `See documentation prior to executing: +https://github.com/openshift/ops-sop/blob/master/hypershift/knowledge_base/howto/replace-pull-secret.md +https://github.com/openshift/ops-sop/blob/master/v4/howto/transfer_cluster_ownership.md +https://access.redhat.com/solutions/6126691 +` + +const transferOwnerCmdExample = ` + # Transfer ownership + osdctl cluster transfer-owner --new-owner "$NEW_ACCOUNT" --old-owner "$OLD_ACCOUNT" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" +` + func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command { ops := newTransferOwnerOptions(streams, globalOpts) transferOwnerCmd := &cobra.Command{ Use: "transfer-owner", Short: "Transfer cluster ownership to a new user (to be done by Region Lead)", + Long: fmt.Sprintf("Transfer cluster ownership to a new user (to be done by Region Lead)\n\n%s\n", transferOwnerDocs), Args: cobra.NoArgs, + Example: transferOwnerCmdExample, DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.preRun()) }, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.run()) }, @@ -73,12 +102,14 @@ func newCmdTransferOwner(streams genericclioptions.IOStreams, globalOpts *global transferOwnerCmd.Flags().StringVar(&ops.oldOwnerName, "old-owner", ops.oldOwnerName, "The old owner's username to transfer the cluster from") transferOwnerCmd.Flags().StringVar(&ops.newOwnerName, "new-owner", ops.newOwnerName, "The new owner's username to transfer the cluster to") transferOwnerCmd.Flags().BoolVarP(&ops.dryrun, "dry-run", "d", false, "Dry-run - show all changes but do not apply them") + transferOwnerCmd.Flags().BoolVar(&ops.doPullSecretOnly, "pull-secret-only", false, "Update cluster pull secret from current OCM AccessToken data without ownership transfer") transferOwnerCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usualy an OHSS or PD ticket)") _ = transferOwnerCmd.MarkFlagRequired("cluster-id") _ = transferOwnerCmd.MarkFlagRequired("old-owner") _ = transferOwnerCmd.MarkFlagRequired("new-owner") _ = transferOwnerCmd.MarkFlagRequired("reason") + _ = transferOwnerCmd.Flags().MarkHidden("pull-secret-only") return transferOwnerCmd } @@ -99,25 +130,60 @@ type serviceLogParameters struct { IsExternalOrgTransfer bool } -func generateInternalServiceLog(params serviceLogParameters) servicelog.PostCmdOptions { - return servicelog.PostCmdOptions{ +func (o *transferOwnerOptions) preRun() error { + // Initialize the color formats... + red = color.New(color.FgHiRed, color.BgBlack) + green = color.New(color.FgHiGreen, color.BgBlack) + blue = color.New(color.FgHiBlue, color.BgBlack) + if o.doPullSecretOnly { + o.opDescription = "update pull-secret" + } else { + o.opDescription = "transfer ownership" + } + return nil +} + +func generateInternalServiceLog(params serviceLogParameters, dryRun bool) servicelog.PostCmdOptions { + sl := servicelog.PostCmdOptions{ ClusterId: params.ClusterID, TemplateParams: []string{ "MESSAGE=" + fmt.Sprintf("From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.", params.OldOwnerName, params.OldOwnerID, params.NewOwnerName, params.NewOwnerID), }, InternalOnly: true, } + sl.SetDryRun(dryRun) + return sl } -func generateServiceLog(params serviceLogParameters, template string) servicelog.PostCmdOptions { - return servicelog.PostCmdOptions{ +func generateServiceLog(params serviceLogParameters, template string, dryRun bool) servicelog.PostCmdOptions { + sl := servicelog.PostCmdOptions{ Template: template, ClusterId: params.ClusterID, } + sl.SetDryRun(dryRun) + return sl +} + +// Wrapper providing additional info from SOP procedure +func getOcmAccountWithHelpMessage(ocm *sdk.Connection, userName string) (*amv1.Account, error) { + account, err := utils.GetAccount(ocm, userName) + if err != nil { + red.Fprintf(os.Stderr, "Could not get account for:'%s'. Error:'%s'\n", userName, err) + red.Fprintf(os.Stderr, "If 0 accounts were found, this may indicate '%s' has never logged on http://console.redhat.com/ .\n", userName) + red.Fprintf(os.Stderr, "Please confirm '%s' is correct. If correct, please ask this user to login at least once on http://console.redhat.com/.\n", userName) + red.Fprint(os.Stderr, "Note: It can take some time (up to ~1h) after the first login before the OCM database is updated.\n") + return account, fmt.Errorf("could not get current owner's account, err:'%w'", err) + } + return account, err } func updatePullSecret(conn *sdk.Connection, kubeCli client.Client, clientset *kubernetes.Clientset, clusterID string, pullsecret []byte) error { currentEnv := utils.GetCurrentOCMEnv(conn) + if currentEnv == "stage" { + // stage hive cluster namespaces are prefixed 'uhc-staging' although the ocm url is currently using 'stage' + // This may be better as a loop over all the namespaces looking for a clusterid match instead? + currentEnv = "staging" + } secretName := "pull" hiveNamespace := "uhc-" + currentEnv + "-" + clusterID @@ -127,7 +193,7 @@ func updatePullSecret(conn *sdk.Connection, kubeCli client.Client, clientset *ku } if len(clusterDeployments.Items) == 0 { - return fmt.Errorf("failed to retreive cluster deployments") + return fmt.Errorf("error, found '0' cluster deployments in hive namespace:'%s'", hiveNamespace) } cdName := clusterDeployments.Items[0].ObjectMeta.Name @@ -163,10 +229,11 @@ func updatePullSecret(conn *sdk.Connection, kubeCli client.Client, clientset *ku func awaitPullSecretSyncSet(hiveNamespace string, cdName string, kubeCli client.Client) error { ctx := context.TODO() + const ssName string = "pull-secret-replacement" syncSet := &hiveapiv1.SyncSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "pull-secret-replacement", + Name: ssName, Namespace: hiveNamespace, }, Spec: hiveapiv1.SyncSetSpec{ @@ -237,19 +304,21 @@ func awaitPullSecretSyncSet(hiveNamespace string, cdName string, kubeCli client. time.Sleep(time.Second * 5) } if !isSSSynced { - return fmt.Errorf("syncset failed to sync. Please verify syncset is still there and manually delete syncset ") + return fmt.Errorf("syncset:'%s/%s' failed to sync. Please verify syncset is still there and manually delete syncset ", hiveNamespace, ssName) } // Clean up the SS on hive err = kubeCli.Delete(ctx, syncSet) if err != nil { - return fmt.Errorf("failed to delete SyncSet: %w", err) + return fmt.Errorf("failed to delete SyncSet:'%s/%s', err: %w", hiveNamespace, ssName, err) } return nil } +// func rolloutTelemeterClientPods(clientset *kubernetes.Clientset, namespace, selector string) error { func rolloutPods(clientset *kubernetes.Clientset, namespace, selector string) error { + fmt.Printf("Attempting to refresh pods, namespace:'%s', selector:'%s'\n", namespace, selector) // Delete pods with the specified label selector in the specified namespace. pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: selector, @@ -270,46 +339,92 @@ func rolloutPods(clientset *kubernetes.Clientset, namespace, selector string) er return nil } -func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret string) error { +func comparePullSecretAuths(pullSecret *corev1.Secret, expectedAuths map[string]*amv1.AccessTokenAuth) error { + var err error = nil + var psTokenAuth *amv1.AccessTokenAuth = nil + + blue.Println("\nComparing pull-secret to expected auth sections...") + for akey, auth := range expectedAuths { + // Find the matching auth entry for this registry name in the cluster pull_secret data... + psTokenAuth, err = getPullSecretTokenAuth(akey, pullSecret) + if err != nil { + err = errors.Join(err, fmt.Errorf("failed to fetch expected auth['%s'] from cluster pull-secret, err:'%s'", akey, err)) + continue + } + if psTokenAuth == nil { + err = errors.Join(err, fmt.Errorf("failed to fetch expected auth['%s'] from cluster pull-secret, err: (nil authToken)", akey)) + continue + } + if auth.Auth() != psTokenAuth.Auth() { + err = errors.Join(err, fmt.Errorf("expected auth['%s'] does not match authToken found in cluster pull-secret", akey)) + } else { + green.Printf("Auth '%s' - tokens match\n", akey) + } + if auth.Email() != psTokenAuth.Email() { + err = errors.Join(err, fmt.Errorf("expected auth['%s'] does not match email found in cluster pull-secret", akey)) + } else { + green.Printf("Auth '%s' - emails match\n", akey) + } + } + return err +} + +func verifyClusterPullSecret(clientset *kubernetes.Clientset, expectedPullSecret string, expectedAuths map[string]*amv1.AccessTokenAuth) error { // Retrieve the pull secret from the "openshift-config" namespace pullSecret, err := clientset.CoreV1().Secrets("openshift-config").Get(context.TODO(), "pull-secret", metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get pull secret: %w", err) } - - // Print the actual pull secret data pullSecretData, ok := pullSecret.Data[".dockerconfigjson"] if !ok { return fmt.Errorf("pull secret data not found in the secret") } - - fmt.Println("Actual Cluster Pull Secret:") - fmt.Println(string(pullSecretData)) - - // Print the expected pull secret - fmt.Println("\nExpected Cluster Pull Secret:") - fmt.Println(expectedPullSecret) - - // Ask the user to confirm if the actual pull secret matches their expectation - reader := bufio.NewReader(os.Stdin) - fmt.Print("\nDoes the actual pull secret match your expectation? (yes/no): ") - response, err := reader.ReadString('\n') + err = comparePullSecretAuths(pullSecret, expectedAuths) if err != nil { - return fmt.Errorf("failed to read user input: %w", err) - } + red.Printf("\nFound mis-matching auth values during compare. Please review:\n%s", err) + fmt.Print("Would you like to continue?") + if !utils.ConfirmPrompt() { + return fmt.Errorf("operation aborted by the user") + } + } else { + green.Println("\nComparison shows subset of Auths from OCM AuthToken have matching tokens + emails in cluster pull-secret. PASS") + } + // This step was in the original utlity so leaving the option to print data to terminal here, + // but making it optional and prompting the user instead. The new programatic + // comparisons per comparePullSecretAuths() may negate the need for a visual inspection in most cases... + red.Print("\nWARNING: This will print sensitive data to the terminal!\n") + fmt.Print("Would you like to print pull secret content to screen for additional visual comparison?\n") + fmt.Print("Choose 'N' to skip, 'Y' to display secret. ") + if utils.ConfirmPrompt() { + // Print the actual pull secret data + blue.Println("Actual Cluster Pull Secret:") + fmt.Println(string(pullSecretData)) + + // Print the expected pull secret + blue.Println("\nExpected Auths from OCM AccessToken expected to be present in Pull Secret (note this can be a subset):") + fmt.Println(expectedPullSecret) + + // Ask the user to confirm if the actual pull secret matches their expectation + reader := bufio.NewReader(os.Stdin) + fmt.Print("\nDoes the actual pull secret match your expectation? (yes/no): ") + response, err := reader.ReadString('\n') + if err != nil { + return fmt.Errorf("failed to read user input: %w", err) + } + response = strings.ToLower(strings.TrimSpace(response)) + if response != "yes" { + return fmt.Errorf("operation aborted by the user") + } - response = strings.ToLower(strings.TrimSpace(response)) - if response != "yes" { - return fmt.Errorf("operation aborted by the user") + green.Println("Pull secret verification (by user) successful.") + } else { + fmt.Println("(Skipping display)") } - - fmt.Println("Pull secret verification successful.") - return nil } func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, mgmtClusterName string, pullsecret []byte) error { - + fmt.Printf("updateManifestwork begin...\n") if err := workv1.AddToScheme(kubeCli.Scheme()); err != nil { return fmt.Errorf("failed to add scheme: %w", err) } @@ -325,6 +440,7 @@ func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, secretNamePrefix := hostedCluster.DomainPrefix() + "-pull" // Generate a random new secret name based on the existing pull secret name + // A new secret 'name' is used here to trigger the update(?) randomSuffix := func(chars string, length int) string { rand.Seed(time.Now().UnixNano()) result := make([]byte, length) @@ -335,6 +451,7 @@ func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, } newSecretName := secretNamePrefix + "-" + randomSuffix("0123456789abcdef", 6) + fmt.Printf("get() Manifestwork...\n") manifestWork := &workv1.ManifestWork{} err = kubeCli.Get(context.TODO(), types.NamespacedName{Name: manifestWorkName, Namespace: manifestWorkNamespace}, manifestWork) if err != nil { @@ -394,13 +511,15 @@ func updateManifestWork(conn *sdk.Connection, kubeCli client.Client, clusterID, } } + fmt.Printf("update() Manifestwork...\n") err = kubeCli.Update(context.TODO(), manifestWork, &client.UpdateOptions{}) if err != nil { return fmt.Errorf("cannot update the pull-secret within manifestwork: %w", err) } // The secret will be synced to the management cluster and guest cluster in a few seconds, wait here - fmt.Println("sleep 60 seconds here to make sure secret gets synced on guest cluster") + fmt.Println("Manifest work updated. ") + fmt.Println("Sleeping 60 seconds here to allow secret to be synced on guest cluster") time.Sleep(time.Second * 60) return nil @@ -443,6 +562,9 @@ func buildNewSecret(oldpullsecret, newpullsecret []byte) ([]byte, error) { func (o *transferOwnerOptions) run() error { // Initiate Connections First + var err error + // To avoid warnings/backtrace, if k8s controller-runtime logger is not yet set, do it now... + log.SetLogger(zap.New(zap.WriteTo(os.Stderr))) // Create an OCM client to talk to the cluster API // the user has to be logged in (e.g. 'ocm login') ocm, err := utils.CreateConnection() @@ -457,8 +579,41 @@ func (o *transferOwnerOptions) run() error { // Gather all required data cluster, err := utils.GetClusterAnyStatus(ocm, o.clusterID) + if err != nil { + return err + } o.cluster = cluster o.clusterID = cluster.ID() + var userName string + var subscription *amv1.Subscription = nil + var oldOwnerAccount *amv1.Account = nil + var ok bool + if o.hypershift { + // Hypershift transfers carry additional risks and should not be performed by + // this command at this time. + err = fmt.Errorf("hypershift clusters are not currently supported with this command") + fmt.Printf("Warning %s\n%s\n", err, transferOwnerDocs) + return err + } + if o.doPullSecretOnly { + // This is updating the pull secret and not a ownwership transfer. + // Use existing subscription, account, and userName value... + subscription, err = utils.GetSubscription(ocm, o.clusterID) + if err != nil { + return fmt.Errorf("failed to get subscription info for cluster:'%s', err: '%v'", o.clusterID, err) + } + oldOwnerAccount, err = utils.GetAccount(ocm, subscription.Creator().ID()) + if err != nil { + return fmt.Errorf("failed to get account info from subscription, err:'%v'", err) + } + userName = oldOwnerAccount.Username() + fmt.Printf("Username:'%s'\n", userName) + } else { + oldOwnerAccount, err = getOcmAccountWithHelpMessage(ocm, o.oldOwnerName) + if err != nil { + return err + } + } var mgmtCluster, svcCluster, hiveCluster, masterCluster *cmv1.Cluster @@ -469,7 +624,7 @@ func (o *transferOwnerOptions) run() error { // Find and setup all resources that are needed if o.hypershift { - fmt.Println("Given cluster is HCP, start to proceed the HCP owner transfer") + fmt.Printf("Given cluster is HCP, start to proceed for an HCP '%s' \n", o.opDescription) mgmtCluster, err = utils.GetManagementCluster(o.clusterID) svcCluster, err = utils.GetServiceCluster(o.clusterID) if err != nil { @@ -477,7 +632,7 @@ func (o *transferOwnerOptions) run() error { } masterCluster = svcCluster } else { - fmt.Println("Given cluster is OSD/ROSA classic, start to proceed the classic owner transfer") + fmt.Printf("Given cluster is OSD/ROSA classic, start to proceed for a classic '%s'\n", o.opDescription) hiveCluster, err = utils.GetHiveCluster(o.clusterID) if err != nil { return err @@ -487,11 +642,14 @@ func (o *transferOwnerOptions) run() error { elevationReasons := []string{ o.reason, - fmt.Sprintf("Updating pull secret using osdctl to tranfert owner to %s", o.newOwnerName), } - + if o.doPullSecretOnly { + elevationReasons = append(elevationReasons, "Updating pull secret using osdctl") + } else { + elevationReasons = append(elevationReasons, fmt.Sprintf("Updating pull secret using osdctl to transfer owner to %s", o.newOwnerName)) + } // Gather all required information - fmt.Println("Gathering all required information for the cluster transfer...") + fmt.Printf("Gathering all required information for the cluster '%s'...\n", o.opDescription) cluster, err = utils.GetCluster(ocm, o.clusterID) if err != nil { return fmt.Errorf("failed to get cluster information for cluster with ID %s: %w", o.clusterID, err) @@ -502,19 +660,22 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("cluster has no external id") } - subscription, err := utils.GetSubscription(ocm, o.clusterID) - if err != nil { - return fmt.Errorf("could not get subscription: %w", err) + if subscription == nil { + subscription, err = utils.GetSubscription(ocm, o.clusterID) + if err != nil { + return fmt.Errorf("could not get subscription: %w", err) + } } subscriptionID, ok := subscription.GetID() if !ok { return fmt.Errorf("Could not get subscription id") } - - oldOwnerAccount, err := utils.GetAccount(ocm, o.oldOwnerName) - if !ok { - return fmt.Errorf("could not get current owner's account, ask the user to log into http://console.redhat.com/ and try again: %w", err) + if oldOwnerAccount == nil { + oldOwnerAccount, err = utils.GetAccount(ocm, o.oldOwnerName) + if err != nil { + return fmt.Errorf("could not get current owner's account, ask the user to log into http://console.redhat.com/ and try again: %w", err) + } } oldOwnerOrganization, ok := oldOwnerAccount.GetOrganization() @@ -527,20 +688,39 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("current owner's organization has no ID") } - newOwnerAccount, err := utils.GetAccount(ocm, o.newOwnerName) - if err != nil { - return fmt.Errorf("could not get new owner's account, ask the user to log into http://console.redhat.com/ and try again: %w", err) - } - - newOwnerOrganization, ok := newOwnerAccount.GetOrganization() - if !ok { - return fmt.Errorf("new owner has no organization") + var newOwnerAccount *amv1.Account = nil + var newOwnerOrganization *amv1.Organization = nil + var oldUsername string + if o.doPullSecretOnly { + // This is not an ownership transfer, just pull-secret update, + // new info == old info. + // TODO: Can likely skip most of the next set of checks, but why not? + oldUsername, ok = oldOwnerAccount.GetUsername() + if !ok { + fmt.Printf("old username not found?\n") + } + fmt.Printf("Using old account values. OwnerAccount:'%s'\n", oldUsername) + newOwnerAccount = oldOwnerAccount + newOwnerOrganization = oldOwnerOrganization + } else { + newOwnerAccount, err = utils.GetAccount(ocm, o.newOwnerName) + if err != nil { + return fmt.Errorf("could not get new owner's account, ask the user to log into http://console.redhat.com/ and try again: %w", err) + } + newOwnerOrganization, ok = newOwnerAccount.GetOrganization() + if !ok { + return fmt.Errorf("new account has no organization") + } } newOwnerOrganizationId, ok := newOwnerOrganization.GetID() if !ok { return fmt.Errorf("new owner's organization has no ID") } + fmt.Printf("old orgID:'%s', new orgID:'%s'\n", newOwnerOrganizationId, oldOrganizationId) + if o.doPullSecretOnly && newOwnerOrganizationId != oldOrganizationId { + return fmt.Errorf("new org != old org. Ownership transfer not expected with pull-secret-only flag") + } newOwnerAccountID, ok := newOwnerAccount.GetID() if !ok { @@ -591,12 +771,14 @@ func (o *transferOwnerOptions) run() error { if !ok { return fmt.Errorf("cannot get new org ebs id") } - - // Confirm if the ownership transfer looks correct - fmt.Printf("Transfer cluster: \t\t'%v' (%v)\n", externalClusterID, cluster.Name()) - fmt.Printf("from user \t\t\t'%v' ('%v') to '%v ('%v')'\n", oldOwnerAccount.ID(), oldOwnerUsername, newOwnerAccountID, newOwnerUsername) - if !utils.ConfirmPrompt() { - return nil + if !o.doPullSecretOnly { + // Confirm if the ownership transfer looks correct + fmt.Printf("\nTransfer cluster: \t\t'%v' (%v)\n", externalClusterID, cluster.Name()) + fmt.Printf("from user \t\t\t'%v' ('%v') to '%v ('%v')'\n", oldOwnerAccount.ID(), oldOwnerUsername, newOwnerAccountID, newOwnerUsername) + fmt.Print("Is the above correct? Proceed with transfer? ") + if !utils.ConfirmPrompt() { + return nil + } } orgChanged := oldOrganizationId != newOwnerOrganizationId @@ -611,22 +793,44 @@ func (o *transferOwnerOptions) run() error { IsExternalOrgTransfer: orgChanged, } - // Send a SL saying we're about to start - fmt.Println("Notify the customer before ownership transfer commences. Sending service log.") - postCmd := generateServiceLog(slParams, SL_TRANSFER_INITIATED) - if err := postCmd.Run(); err != nil { - fmt.Println("Failed to POST customer service log. Please manually send a service log to notify the customer before ownership transfer commences:") - fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", - o.clusterID, SL_TRANSFER_INITIATED, strings.Join(postCmd.TemplateParams, " -p ")) + var postCmd servicelog.PostCmdOptions + //TODO: If only updating the pull-secret, and not transferring ownership + // should we send a SL both before and after the rotate operation? + // Currently only sending an after the pull-secret update is completed + // when not also transferring ownership. + if !o.doPullSecretOnly { + // Send a SL saying we're about to start ownership transfer + fmt.Println("Notify the customer before ownership transfer commences. Sending service log.") + postCmd = generateServiceLog(slParams, SL_TRANSFER_INITIATED, o.dryrun) + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST customer service log. Please manually send a service log to notify the customer before ownership transfer commences:") + fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", + o.clusterID, SL_TRANSFER_INITIATED, strings.Join(postCmd.TemplateParams, " -p ")) + } } // Send internal SL to cluster with additional details in case we // need them later. This prevents leaking PII to customers. - postCmd = generateInternalServiceLog(slParams) - fmt.Println("Internal SL Being Sent") - if err := postCmd.Run(); err != nil { - fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") - fmt.Println(fmt.Sprintf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID)) + fmt.Print("\nPlease review the following'Internal' ServiceLog. (Choose 'Y' to send, or 'N' to skip sending this SL...)\n") + if o.doPullSecretOnly { + postCmd = servicelog.PostCmdOptions{ + ClusterId: slParams.ClusterID, + TemplateParams: []string{ + "MESSAGE=" + fmt.Sprintf("Pull-secret update initiated. UserName:'%s', OwnerID:'%s'", slParams.OldOwnerID, slParams.OldOwnerName), + }, + InternalOnly: true, + } + postCmd.SetDryRun(o.dryrun) + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") + fmt.Printf("osdctl servicelog post -i -p MESSAGE=\"Pull-secret update. UserName:'%s', OwnerID:'%s'.\" %s \n", slParams.OldOwnerID, slParams.OldOwnerName, slParams.ClusterID) + } + } else { + postCmd = generateInternalServiceLog(slParams, o.dryrun) + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST internal service log. Please manually send a service log to persist details of the customer transfer before proceeding:") + fmt.Printf("osdctl servicelog post -i -p MESSAGE=\"From user '%s' in Red Hat account %s => user '%s' in Red Hat account %s.\" %s \n", slParams.OldOwnerName, slParams.OldOwnerID, slParams.NewOwnerName, slParams.NewOwnerID, slParams.ClusterID) + } } masterKubeCli, _, masterKubeClientSet, err := common.GetKubeConfigAndClient(masterCluster.ID(), elevationReasons...) @@ -634,10 +838,31 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("failed to retrieve Kubernetes configuration and client for Hive cluster ID %s: %w", masterCluster.ID(), err) } - // Fetch the pull secret with the given new username - response, err := ocm.AccountsMgmt().V1().AccessToken().Post().Impersonate(newOwnerUsername).Parameter("body", nil).Send() + // Get account running this command to compare against cluster's account for + // impersonation purposes. + currentAccountResp, err := ocm.AccountsMgmt().V1().CurrentAccount().Get().Send() + var currentOCMAccount *amv1.Account = nil + if err != nil { + //Ignore this error and continue with an attempt to use 'impersonate' instead... + fmt.Fprintf(os.Stderr, "Failed to fetch currentAccount info, err:'%v'\n", err) + currentAccountResp = nil + } else { + currentOCMAccount = currentAccountResp.Body() + } + + // Fetch the current Access Token for pull secret with the given new username from OCM + var response *amv1.AccessTokenPostResponse = nil + if currentOCMAccount == nil || currentOCMAccount.Username() != newOwnerUsername { + // This account is not owned by the OCM account running this command, so impersonate... + // Impersonate requires region-lead permissions at this time. + response, err = ocm.AccountsMgmt().V1().AccessToken().Post().Impersonate(newOwnerUsername).Parameter("body", nil).Send() + } else { + // This account is owned by the OCM account running this command, no need to impersonate + // This allows non-region leads to test this utility against their own test clusters. + response, err = ocm.AccountsMgmt().V1().AccessToken().Post().Send() + } if err != nil { - return fmt.Errorf("Can't send request: %w", err) + return fmt.Errorf("failed to fetch OCM AccessToken: %w", err) } auths, ok := response.Body().GetAuths() @@ -659,53 +884,126 @@ func (o *transferOwnerOptions) run() error { return fmt.Errorf("failed to marshal pull secret data: %w", err) } - // Print the pull secret - fmt.Println("Pull Secret:") - fmt.Println(string(pullSecret)) - - // Ask the user if they would like to continue - var continueConfirmation string - fmt.Print("Do you want to continue? (yes/no): ") - _, err = fmt.Scanln(&continueConfirmation) - if err != nil { - return fmt.Errorf("failed to read user input: %w", err) - } + // This step was in the original utlity so leaving the option to print data to terminal here, + // but making it optional and prompting the user instead. The new programatic + // comparisons per comparePullSecretAuths() may negate the need for a visual inspection in most cases... + red.Print("\nWARNING: This will print sensitive data to the terminal!\n") + fmt.Print("Would you like to print pull secret content to screen for visual review?\nDisplay pullsecret data (choose 'N' to skip, 'Y' to display)? ") + if utils.ConfirmPrompt() { + //Attempt to pretty print the json for easier user initial review... + prettySecret, err := json.MarshalIndent(map[string]map[string]map[string]string{ + "auths": authsMap, + }, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error Marshalling data for pretty print. Err:'%v'", err) + } else { + blue.Println("Pull Secret data(Indented)...") + blue.Printf("\n%s\n", prettySecret) + } - // Check the user's response - if continueConfirmation != "yes" { - return fmt.Errorf("operation aborted by the user") - } + // Print the pull secret in it's actual form for user to confirm (ie no go, json, formatting errors, etc) + green.Print("\nPlease review Pull Secret data to be used for update(after formatting):\n") + fmt.Println(string(pullSecret)) - if o.hypershift { - err = updateManifestWork(ocm, masterKubeCli, o.clusterID, mgmtCluster.Name(), pullSecret) + // Ask the user if they would like to continue + var continueConfirmation string + fmt.Print("\nDo you want to continue? (yes/no): ") + _, err = fmt.Scanln(&continueConfirmation) if err != nil { - return fmt.Errorf("failed to update pull secret for service cluster with ID %s: %w", o.clusterID, err) + return fmt.Errorf("failed to read user input: %w", err) + } + + // Check the user's response + if continueConfirmation != "yes" { + return fmt.Errorf("operation aborted by the user") } } else { - err = updatePullSecret(ocm, masterKubeCli, masterKubeClientSet, o.clusterID, pullSecret) - if err != nil { - return fmt.Errorf("failed to update pull secret for Hive cluster with ID %s: %w", o.clusterID, err) + fmt.Println("(Skipping display)") + } + // Dont update actual cluster artifacts if this is a dryun... + if o.dryrun { + fmt.Println("This is a 'dryrun', skipping the actual update of the cluster's pull secret.") + } else { + if o.hypershift { + err = updateManifestWork(ocm, masterKubeCli, o.clusterID, mgmtCluster.Name(), pullSecret) + if err != nil { + return fmt.Errorf("failed to update pull secret for service cluster with ID %s: %w", o.clusterID, err) + } + } else { + err = updatePullSecret(ocm, masterKubeCli, masterKubeClientSet, o.cluster.ID(), pullSecret) + if err != nil { + return fmt.Errorf("failed to update pull secret for Hive cluster with ID %s: %w", o.clusterID, err) + } } } + fmt.Println("Create cluster kubecli...") _, _, targetClientSet, err := common.GetKubeConfigAndClient(o.clusterID, elevationReasons...) if err != nil { return fmt.Errorf("failed to retrieve Kubernetes configuration and client for cluster with ID %s: %w", o.clusterID, err) } + fmt.Println("Cluster kubecli created") // Rollout the telemeterClient pod for non HCP clusters if !o.hypershift { - err = rolloutPods(targetClientSet, "openshift-monitoring", "app.kubernetes.io/name=telemeter-client") + if o.dryrun { + fmt.Println("This is a 'dryrun', not rolling openshift-monitoring telemeter-client pods.") + } else { + err = rolloutPods(targetClientSet, "openshift-monitoring", "app.kubernetes.io/name=telemeter-client") + if err != nil { + return fmt.Errorf("failed to roll out Telemeter Client pods in namespace 'openshift-monitoring' with label selector 'app.kubernetes.io/name=telemeter-client': %w", err) + } + } + } + + // During a dry-run the PS has not yet been updated and can provide misleading errors + if o.dryrun { + fmt.Println("This is a 'dryrun', skipping the verification of cluster's pull secret.") + } else { + err = verifyClusterPullSecret(targetClientSet, string(pullSecret), auths) if err != nil { - return fmt.Errorf("failed to roll out Telemeter Client pods in namespace 'openshift-monitoring' with label selector 'app.kubernetes.io/name=telemeter-client': %w", err) + return fmt.Errorf("error verifying cluster pull secret: %w", err) } } - err = verifyClusterPullSecret(targetClientSet, string(pullSecret)) - if err != nil { - return fmt.Errorf("error verifying cluster pull secret: %w", err) + if o.doPullSecretOnly { + // Rollout the ocmAgent pods for non HCP clusters + if !o.hypershift { + if o.dryrun { + fmt.Print("This is a 'dryrun', not rolling openshift-ocm-agent-operator ocm-agent pods.\n") + } else { + err = rolloutPods(targetClientSet, "openshift-ocm-agent-operator", "app=ocm-agent") + if err != nil { + return fmt.Errorf("failed to roll out OCM Agent pods in namespace 'openshift-ocm-agent-operator' with label selector 'app=ocm-agent': %w", err) + } + } + } + + // User has chosen to update pull secret w/o ownership transfer. + // Send SL to notify customer this is completed, then return the command. + fmt.Println("Notify the customer the pull-secret update is completed. Sending service log.") + //postCmd = generateServiceLog(slParams, SL_PULL_SECRET_ROTATED) + postCmd = servicelog.PostCmdOptions{ + Template: SL_PULLSEC_ROTATED, + ClusterId: o.clusterID, + TemplateParams: []string{fmt.Sprintf("ACCOUNT=%s", oldOwnerAccountID)}, + } + postCmd.SetDryRun(o.dryrun) + if err := postCmd.Run(); err != nil { + fmt.Println("Failed to POST service log. Please manually send a service log to notify the customer the pull-secrete update completed:") + fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", + o.clusterID, SL_PULLSEC_ROTATED, strings.Join(postCmd.TemplateParams, " -p ")) + } + if o.dryrun { + fmt.Print("This is a dry run, pull secret not updated.\n") + return nil + } + fmt.Printf("Pull secret update complete, exiting successfully\n") + return nil } + // Transfer ownership specific operations... + ok = validateOldOwner(oldOrganizationId, subscription, oldOwnerAccount) if !ok { fmt.Print("can't validate this is old owners cluster, this could be because of a previously failed run\n") @@ -755,7 +1053,7 @@ func (o *transferOwnerOptions) run() error { response, err := subscriptionClient.Update().Body(subscriptionOrgPatch).Send() if err != nil || response.Status() != 200 { - return fmt.Errorf("request failed with status: %d, '%w'", response.Status(), err) + return fmt.Errorf("update Subscription request to patch org failed with status: %d, err:'%w'", response.Status(), err) } fmt.Printf("Patched organization on subscription\n") } @@ -764,7 +1062,16 @@ func (o *transferOwnerOptions) run() error { patchRes, err := subscriptionCreatorPatchRequest.Send() if err != nil || patchRes.Status() != 200 { - return fmt.Errorf("request failed with status: %d, '%w'", patchRes.Status(), err) + // err var is not always set to something meaningful here. + // Instead the response body usually contains the err info... + red.Fprintf(os.Stderr, "Error, Patch Request Response: '%s'\n", patchRes.String()) + var errString string + if err != nil { + errString = fmt.Sprintf("%v", err) + } else { + errString = patchRes.String() + } + return fmt.Errorf("failed to patch creator_id for subscription:'%s', request status: %d, err: '%s'", subscriptionID, patchRes.Status(), errString) } fmt.Printf("Patched creator on subscription\n") @@ -772,7 +1079,7 @@ func (o *transferOwnerOptions) run() error { err = deleteOldRoleBinding(ocm, subscriptionID) if err != nil { - fmt.Printf("can't delete old rolebinding %v \n", err) + fmt.Printf("Warning, can't delete old rolebinding, err: %v \n", err) } // create new rolebinding @@ -781,13 +1088,13 @@ func (o *transferOwnerOptions) run() error { // don't fail if the rolebinding already exists, could be rerun if err != nil { - return fmt.Errorf("request failed '%w'", err) + return fmt.Errorf("account new roleBinding request failed, err: '%w'", err) } else if postRes.Status() == 201 { fmt.Printf("Created new role binding.\n") } else if postRes.Status() == 409 { fmt.Printf("can't add new rolebinding, rolebinding already exists\n") } else { - return fmt.Errorf("request failed with status: %d, '%w'", postRes.Status(), err) + return fmt.Errorf("account new roleBinding request failed with status: %d, err: '%w'", postRes.Status(), err) } // If the organization id has changed, re-register the cluster with CS with the new organization id @@ -795,16 +1102,15 @@ func (o *transferOwnerOptions) run() error { request, err := createNewRegisterClusterRequest(ocm, externalClusterID, subscriptionID, newOwnerOrganizationId, clusterURL, displayName) if err != nil { - return fmt.Errorf("can't create RegisterClusterRequest with CS, '%w'", err) + return fmt.Errorf("can't create RegisterClusterRequest with CS, err:'%w'", err) } response, err := request.Send() if err != nil || (response.Status() != 200 && response.Status() != 201) { - return fmt.Errorf("request failed with status: %d, '%w'", response.Status(), err) + return fmt.Errorf("newRegisterClusterRequest failed with status: %d, err:'%w'", response.Status(), err) } fmt.Print("Re-registered cluster\n") } - // Rollout the ocmAgent pods for non HCP clusters if !o.hypershift { err = rolloutPods(targetClientSet, "openshift-ocm-agent-operator", "app=ocm-agent") @@ -815,12 +1121,12 @@ func (o *transferOwnerOptions) run() error { err = validateTransfer(ocm, subscription.ClusterID(), newOwnerOrganizationId) if err != nil { - return fmt.Errorf("error while validating transfer %w", err) + return fmt.Errorf("error while validating transfer. %w", err) } fmt.Print("Transfer complete\n") fmt.Println("Notify the customer the ownership transfer is completed. Sending service log.") - postCmd = generateServiceLog(slParams, SL_TRANSFER_COMPLETE) + postCmd = generateServiceLog(slParams, SL_TRANSFER_COMPLETE, o.dryrun) if err := postCmd.Run(); err != nil { fmt.Println("Failed to POST service log. Please manually send a service log to notify the customer the ownership transfer is completed:") fmt.Printf("osdctl servicelog post %v -t %v -p %v\n", @@ -837,7 +1143,7 @@ func getRoleBinding(ocm *sdk.Connection, subscriptionID string) (*amv1.RoleBindi Send() if err != nil { - return nil, fmt.Errorf("can't send request: %v", err) + return nil, fmt.Errorf("RoleBindings list, can't send request: %v", err) } if response.Total() == 0 { @@ -858,19 +1164,19 @@ func deleteOldRoleBinding(ocm *sdk.Connection, subscriptionID string) error { oldRoleBinding, err := getRoleBinding(ocm, subscriptionID) if err != nil { - return fmt.Errorf("can't get old owners rolebinding %w", err) + return fmt.Errorf("can't get old owners rolebinding: %w", err) } oldRoleBindingID, ok := oldRoleBinding.GetID() if !ok { - return fmt.Errorf("old rolebinding has no id %w", err) + return fmt.Errorf("old rolebinding has no id, err: %w", err) } oldRoleBindingClient := ocm.AccountsMgmt().V1().RoleBindings().RoleBinding(oldRoleBindingID) response, err := oldRoleBindingClient.Delete().Send() if err != nil { - return fmt.Errorf("request failed '%w'", err) + return fmt.Errorf("request failed, err: '%w'", err) } if response.Status() == 204 { fmt.Printf("Deleted old rolebinding: %v\n", oldRoleBindingID) @@ -880,7 +1186,7 @@ func deleteOldRoleBinding(ocm *sdk.Connection, subscriptionID string) error { fmt.Printf("can't find old rolebinding: %v\n", oldRoleBindingID) return nil } - fmt.Printf("request failed with status: %d\n", response.Status()) + fmt.Printf("delete RoleBindingrequest failed with status: %d\n", response.Status()) return nil } @@ -901,7 +1207,7 @@ func createSubscriptionCreatorPatchRequest(ocm *sdk.Connection, subscriptionID s body, err := json.Marshal(CreatorPatch{accountID}) if err != nil { - return nil, fmt.Errorf("cannot create body for request '%w'", err) + return nil, fmt.Errorf("cannot create body for request, err: '%w'", err) } request.Bytes(body) @@ -935,7 +1241,7 @@ func createNewRegisterClusterRequest(ocm *sdk.Connection, externalClusterID stri body, err := json.Marshal(RegisterCluster{externalClusterID, subscriptionID, organizationID, consoleURL, displayName}) if err != nil { - return nil, fmt.Errorf("cannot create body for request '%w'", err) + return nil, fmt.Errorf("cannot create body for request, err: '%w'", err) } request.Bytes(body) @@ -952,7 +1258,7 @@ func validateTransfer(ocm *sdk.Connection, clusterID string, newOrgID string) er Send() if err != nil || response.Status() != 200 { - return fmt.Errorf("request failed with status: %d, '%w'", response.Status(), err) + return fmt.Errorf("list clusters request failed with status: %d, err: '%w'", response.Status(), err) } if response.Total() == 0 { diff --git a/cmd/cluster/updatepullsecret.go b/cmd/cluster/updatepullsecret.go new file mode 100644 index 000000000..ba4208103 --- /dev/null +++ b/cmd/cluster/updatepullsecret.go @@ -0,0 +1,45 @@ +package cluster + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + + "github.com/openshift/osdctl/internal/utils/globalflags" +) + +const updatePullSecCmdExample = ` + # Update Pull Secret's OCM access token data + osdctl cluster update-pull-secret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" +` + +func newCmdUpdatePullSecret(streams genericclioptions.IOStreams, globalOpts *globalflags.GlobalOptions) *cobra.Command { + ops := newTransferOwnerOptions(streams, globalOpts) + updatePullSecretCmd := &cobra.Command{ + Use: "update-pull-secret", + Short: "Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead)", + Long: fmt.Sprintf("Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead)\n\n%s\n", transferOwnerDocs), + Args: cobra.NoArgs, + Example: updatePullSecCmdExample, + DisableAutoGenTag: true, + PreRun: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(ops.preRun()) }, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(ops.run()) + }, + } + // can we get cluster-id from some context maybe? + updatePullSecretCmd.Flags().StringVarP(&ops.clusterID, "cluster-id", "C", "", "The Internal Cluster ID/External Cluster ID/ Cluster Name") + updatePullSecretCmd.Flags().BoolVarP(&ops.dryrun, "dry-run", "d", false, "Dry-run - show all changes but do not apply them") + updatePullSecretCmd.Flags().StringVar(&ops.reason, "reason", "", "The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket)") + + _ = updatePullSecretCmd.MarkFlagRequired("cluster-id") + _ = updatePullSecretCmd.MarkFlagRequired("reason") + // This arg is used as part of this wrapper command to instruct the transfer op to exit after + // updating the pull secret, and before doing the ownership transfer... + updatePullSecretCmd.Flags().BoolVar(&ops.doPullSecretOnly, "pull-secret-only", true, "Update cluster pull secret from current OCM AccessToken data without ownership transfer") + _ = updatePullSecretCmd.Flags().MarkHidden("pull-secret-only") + + return updatePullSecretCmd +} diff --git a/cmd/servicelog/post.go b/cmd/servicelog/post.go index 40a518eb8..65b757c49 100644 --- a/cmd/servicelog/post.go +++ b/cmd/servicelog/post.go @@ -116,6 +116,10 @@ func (o *PostCmdOptions) Validate() error { return nil } +func (o *PostCmdOptions) SetDryRun(dryRun bool) { + o.isDryRun = dryRun +} + // CheckServiceLogsLastHour returns true if there were servicelogs sent in the past hour, otherwise false func CheckServiceLogsLastHour(clusterId string) bool { timeStampToCompare := time.Now().Add(-time.Hour) diff --git a/docs/README.md b/docs/README.md index 800919142..252dd8ddb 100644 --- a/docs/README.md +++ b/docs/README.md @@ -68,6 +68,7 @@ - `post --cluster-id ` - Send limited support reason to a given cluster - `status --cluster-id ` - Shows the support status of a specified cluster - `transfer-owner` - Transfer cluster ownership to a new user (to be done by Region Lead) + - `update-pull-secret` - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) - `validate-pull-secret --cluster-id ` - Checks if the pull secret email matches the owner email - `validate-pull-secret-ext [CLUSTER_ID]` - Extended checks to confirm pull-secret data is synced with current OCM data - `cost` - Cost Management related utilities @@ -1964,6 +1965,13 @@ osdctl cluster support status --cluster-id [flags] Transfer cluster ownership to a new user (to be done by Region Lead) +See documentation prior to executing: +https://github.com/openshift/ops-sop/blob/master/hypershift/knowledge_base/howto/replace-pull-secret.md +https://github.com/openshift/ops-sop/blob/master/v4/howto/transfer_cluster_ownership.md +https://access.redhat.com/solutions/6126691 + + + ``` osdctl cluster transfer-owner [flags] ``` @@ -1989,6 +1997,40 @@ osdctl cluster transfer-owner [flags] -S, --skip-version-check skip checking to see if this is the most recent release ``` +### osdctl cluster update-pull-secret + +Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) + +See documentation prior to executing: +https://github.com/openshift/ops-sop/blob/master/hypershift/knowledge_base/howto/replace-pull-secret.md +https://github.com/openshift/ops-sop/blob/master/v4/howto/transfer_cluster_ownership.md +https://access.redhat.com/solutions/6126691 + + + +``` +osdctl cluster update-pull-secret [flags] +``` + +#### Flags + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string The name of the kubeconfig cluster to use + -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name + --context string The name of the kubeconfig context to use + -d, --dry-run Dry-run - show all changes but do not apply them + -h, --help help for update-pull-secret + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + ### osdctl cluster validate-pull-secret Checks if the pull secret email matches the owner email. diff --git a/docs/osdctl_cluster.md b/docs/osdctl_cluster.md index 312cf7f10..49dd65e9b 100644 --- a/docs/osdctl_cluster.md +++ b/docs/osdctl_cluster.md @@ -45,6 +45,7 @@ Provides information for a specified cluster * [osdctl cluster ssh](osdctl_cluster_ssh.md) - utilities for accessing cluster via ssh * [osdctl cluster support](osdctl_cluster_support.md) - Cluster Support * [osdctl cluster transfer-owner](osdctl_cluster_transfer-owner.md) - Transfer cluster ownership to a new user (to be done by Region Lead) +* [osdctl cluster update-pull-secret](osdctl_cluster_update-pull-secret.md) - Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) * [osdctl cluster validate-pull-secret](osdctl_cluster_validate-pull-secret.md) - Checks if the pull secret email matches the owner email * [osdctl cluster validate-pull-secret-ext](osdctl_cluster_validate-pull-secret-ext.md) - Extended checks to confirm pull-secret data is synced with current OCM data diff --git a/docs/osdctl_cluster_transfer-owner.md b/docs/osdctl_cluster_transfer-owner.md index 377d551a8..9bcbfdbd8 100644 --- a/docs/osdctl_cluster_transfer-owner.md +++ b/docs/osdctl_cluster_transfer-owner.md @@ -2,10 +2,30 @@ Transfer cluster ownership to a new user (to be done by Region Lead) +### Synopsis + +Transfer cluster ownership to a new user (to be done by Region Lead) + +See documentation prior to executing: +https://github.com/openshift/ops-sop/blob/master/hypershift/knowledge_base/howto/replace-pull-secret.md +https://github.com/openshift/ops-sop/blob/master/v4/howto/transfer_cluster_ownership.md +https://access.redhat.com/solutions/6126691 + + + ``` osdctl cluster transfer-owner [flags] ``` +### Examples + +``` + + # Transfer ownership + osdctl cluster transfer-owner --new-owner "$NEW_ACCOUNT" --old-owner "$OLD_ACCOUNT" --cluster-id 1kfmyclusteristhebesteverp8m --reason "transfer ownership per jira-id" + +``` + ### Options ``` diff --git a/docs/osdctl_cluster_update-pull-secret.md b/docs/osdctl_cluster_update-pull-secret.md new file mode 100644 index 000000000..281072fb1 --- /dev/null +++ b/docs/osdctl_cluster_update-pull-secret.md @@ -0,0 +1,56 @@ +## osdctl cluster update-pull-secret + +Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) + +### Synopsis + +Update cluster pullsecret with current OCM accessToken data(to be done by Region Lead) + +See documentation prior to executing: +https://github.com/openshift/ops-sop/blob/master/hypershift/knowledge_base/howto/replace-pull-secret.md +https://github.com/openshift/ops-sop/blob/master/v4/howto/transfer_cluster_ownership.md +https://access.redhat.com/solutions/6126691 + + + +``` +osdctl cluster update-pull-secret [flags] +``` + +### Examples + +``` + + # Update Pull Secret's OCM access token data + osdctl cluster update-pull-secret --cluster-id 1kfmyclusteristhebesteverp8m --reason "Update PullSecret per pd or jira-id" + +``` + +### Options + +``` + -C, --cluster-id string The Internal Cluster ID/External Cluster ID/ Cluster Name + -d, --dry-run Dry-run - show all changes but do not apply them + -h, --help help for update-pull-secret + --reason string The reason for this command, which requires elevation, to be run (usually an OHSS or PD ticket) +``` + +### Options inherited from parent commands + +``` + --as string Username to impersonate for the operation. User could be a regular user or a service account in a namespace. + --cluster string The name of the kubeconfig cluster to use + --context string The name of the kubeconfig context to use + --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure + --kubeconfig string Path to the kubeconfig file to use for CLI requests. + -o, --output string Valid formats are ['', 'json', 'yaml', 'env'] + --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default "0") + -s, --server string The address and port of the Kubernetes API server + --skip-aws-proxy-check aws_proxy Don't use the configured aws_proxy value + -S, --skip-version-check skip checking to see if this is the most recent release +``` + +### SEE ALSO + +* [osdctl cluster](osdctl_cluster.md) - Provides information for a specified cluster +