|
| 1 | +package app |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "flag" |
| 6 | + "fmt" |
| 7 | + "os" |
| 8 | + |
| 9 | + "github.com/google/uuid" |
| 10 | + "github.com/spf13/cobra" |
| 11 | + "k8s.io/client-go/dynamic" |
| 12 | + "k8s.io/client-go/kubernetes" |
| 13 | + "k8s.io/client-go/rest" |
| 14 | + "k8s.io/client-go/tools/clientcmd" |
| 15 | + "k8s.io/client-go/tools/leaderelection" |
| 16 | + "k8s.io/client-go/tools/leaderelection/resourcelock" |
| 17 | + "k8s.io/klog/v2" |
| 18 | + |
| 19 | + "github.com/karmada-io/karmada/cmd/scheduler/app/options" |
| 20 | + karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" |
| 21 | + "github.com/karmada-io/karmada/pkg/scheduler" |
| 22 | +) |
| 23 | + |
| 24 | +// NewSchedulerCommand creates a *cobra.Command object with default parameters |
| 25 | +func NewSchedulerCommand(stopChan <-chan struct{}) *cobra.Command { |
| 26 | + opts := options.NewOptions() |
| 27 | + |
| 28 | + cmd := &cobra.Command{ |
| 29 | + Use: "scheduler", |
| 30 | + Long: `The karmada scheduler binds resources to the clusters it manages.`, |
| 31 | + Run: func(cmd *cobra.Command, args []string) { |
| 32 | + if err := run(opts, stopChan); err != nil { |
| 33 | + fmt.Fprintf(os.Stderr, "%v\n", err) |
| 34 | + os.Exit(1) |
| 35 | + } |
| 36 | + }, |
| 37 | + } |
| 38 | + |
| 39 | + opts.AddFlags(cmd.Flags()) |
| 40 | + cmd.Flags().AddGoFlagSet(flag.CommandLine) |
| 41 | + return cmd |
| 42 | +} |
| 43 | + |
| 44 | +func run(opts *options.Options, stopChan <-chan struct{}) error { |
| 45 | + resetConfig, err := clientcmd.BuildConfigFromFlags(opts.Master, opts.KubeConfig) |
| 46 | + if err != nil { |
| 47 | + return fmt.Errorf("error building kubeconfig: %s", err.Error()) |
| 48 | + } |
| 49 | + |
| 50 | + dynamicClientSet := dynamic.NewForConfigOrDie(resetConfig) |
| 51 | + karmadaClient := karmadaclientset.NewForConfigOrDie(resetConfig) |
| 52 | + kubeClientSet := kubernetes.NewForConfigOrDie(resetConfig) |
| 53 | + |
| 54 | + ctx, cancel := context.WithCancel(context.Background()) |
| 55 | + go func() { |
| 56 | + <-stopChan |
| 57 | + cancel() |
| 58 | + }() |
| 59 | + |
| 60 | + sched := scheduler.NewScheduler(dynamicClientSet, karmadaClient, kubeClientSet) |
| 61 | + if !opts.LeaderElection.LeaderElect { |
| 62 | + sched.Run(ctx) |
| 63 | + return fmt.Errorf("scheduler exited") |
| 64 | + } |
| 65 | + |
| 66 | + leaderElectionClient, err := kubernetes.NewForConfig(rest.AddUserAgent(resetConfig, "leader-election")) |
| 67 | + if err != nil { |
| 68 | + return err |
| 69 | + } |
| 70 | + hostname, err := os.Hostname() |
| 71 | + if err != nil { |
| 72 | + return fmt.Errorf("unable to get hostname: %v", err) |
| 73 | + } |
| 74 | + // add a uniquifier so that two processes on the same host don't accidentally both become active |
| 75 | + id := hostname + "_" + uuid.New().String() |
| 76 | + |
| 77 | + rl, err := resourcelock.New(opts.LeaderElection.ResourceLock, |
| 78 | + opts.LeaderElection.ResourceNamespace, |
| 79 | + "karmada-scheduler", |
| 80 | + leaderElectionClient.CoreV1(), |
| 81 | + leaderElectionClient.CoordinationV1(), |
| 82 | + resourcelock.ResourceLockConfig{ |
| 83 | + Identity: id, |
| 84 | + }) |
| 85 | + if err != nil { |
| 86 | + return fmt.Errorf("couldn't create resource lock: %v", err) |
| 87 | + } |
| 88 | + |
| 89 | + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ |
| 90 | + Lock: rl, |
| 91 | + LeaseDuration: opts.LeaderElection.LeaseDuration.Duration, |
| 92 | + RenewDeadline: opts.LeaderElection.RenewDeadline.Duration, |
| 93 | + RetryPeriod: opts.LeaderElection.RetryPeriod.Duration, |
| 94 | + Callbacks: leaderelection.LeaderCallbacks{ |
| 95 | + OnStartedLeading: sched.Run, |
| 96 | + OnStoppedLeading: func() { |
| 97 | + klog.Fatalf("leaderelection lost") |
| 98 | + }, |
| 99 | + }, |
| 100 | + }) |
| 101 | + |
| 102 | + return nil |
| 103 | +} |
0 commit comments