diff --git a/.github/workflows/tag.yml b/.github/workflows/tag.yml index d0bcd591..650e7bd0 100644 --- a/.github/workflows/tag.yml +++ b/.github/workflows/tag.yml @@ -9,7 +9,7 @@ jobs: bump-tag: runs-on: ubuntu-latest permissions: - contents: read + contents: write pull-requests: write outputs: new_version: ${{ steps.bump-semver.outputs.new_version }} diff --git a/api/v1beta1/spannerautoscaler_types.go b/api/v1beta1/spannerautoscaler_types.go index 9f039c6a..026df119 100644 --- a/api/v1beta1/spannerautoscaler_types.go +++ b/api/v1beta1/spannerautoscaler_types.go @@ -108,6 +108,14 @@ type ScaleConfig struct { // The cool down period between two consecutive scaledown operations. If this option is omitted, the value of the `--scale-down-interval` command line option is taken as the default value. ScaledownInterval *metav1.Duration `json:"scaledownInterval,omitempty"` + // The maximum number of processing units which can be added in one scale-up operation. It can be a multiple of 100 for values < 1000, or a multiple of 1000 otherwise. + // +kubebuilder:default=0 + ScaleupStepSize int `json:"scaleupStepSize,omitempty"` + + // How often autoscaler is reevaluated for scale up. + // The warm up period between two consecutive scaleup operations. If this option is omitted, the value of the `--scale-up-interval` command line option is taken as the default value. + ScaleupInterval *metav1.Duration `json:"scaleupInterval,omitempty"` + // The CPU utilization which the autoscaling will try to achieve. Ref: [Spanner CPU utilization](https://cloud.google.com/spanner/docs/cpu-utilization#task-priority) TargetCPUUtilization TargetCPUUtilization `json:"targetCPUUtilization"` } diff --git a/api/v1beta1/spannerautoscaler_webhook.go b/api/v1beta1/spannerautoscaler_webhook.go index 5d15f726..22fe7e45 100644 --- a/api/v1beta1/spannerautoscaler_webhook.go +++ b/api/v1beta1/spannerautoscaler_webhook.go @@ -200,6 +200,18 @@ func (r *SpannerAutoscaler) validateScaleConfig() *field.Error { "must be a multiple of 100 for values which are less than 1000") } + if sc.ScaleupStepSize > 1000 && sc.ScaleupStepSize%1000 != 0 { + return field.Invalid( + field.NewPath("spec").Child("scaleConfig").Child("scaleupStepSize"), + sc.ScaleupStepSize, + "must be a multiple of 1000 for values which are greater than 1000") + } else if sc.ScaleupStepSize < 1000 && sc.ScaleupStepSize%100 != 0 { + return field.Invalid( + field.NewPath("spec").Child("scaleConfig").Child("scaleupStepSize"), + sc.ScaleupStepSize, + "must be a multiple of 100 for values which are less than 1000") + } + return nil } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index f3b3c2b6..62ff3029 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -112,6 +112,11 @@ func (in *ScaleConfig) DeepCopyInto(out *ScaleConfig) { *out = new(v1.Duration) **out = **in } + if in.ScaleupInterval != nil { + in, out := &in.ScaleupInterval, &out.ScaleupInterval + *out = new(v1.Duration) + **out = **in + } out.TargetCPUUtilization = in.TargetCPUUtilization } diff --git a/cmd/main.go b/cmd/main.go index 3dfa183c..c2977a89 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -60,6 +60,7 @@ var ( enableLeaderElection = flag.Bool("leader-elect", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") leaderElectionID = flag.String("leader-elect-id", "", "Lease name for leader election.") scaleDownInterval = flag.Duration("scale-down-interval", 55*time.Minute, "The scale down interval.") + scaleUpInterval = flag.Duration("scale-up-interval", 60*time.Second, "The scale up interval.") configFile = flag.String("config", "", "The controller will load its initial configuration from this file. "+ "Omit this flag to use the default configuration values. Command-line flags override configuration from this file.") ) @@ -87,6 +88,7 @@ func main() { "metricsAddr", metricsAddr, "probeAddr", probeAddr, "scaleDownInterval", scaleDownInterval, + "scaleUpInterval", scaleUpInterval, ) cfg, err := config.GetConfig() @@ -139,6 +141,7 @@ func main() { log, controller.WithLog(log), controller.WithScaleDownInterval(*scaleDownInterval), + controller.WithScaleUpInterval(*scaleUpInterval), ) if err := sar.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SpannerAutoscaler") diff --git a/config/crd/bases/spanner.mercari.com_spannerautoscalers.yaml b/config/crd/bases/spanner.mercari.com_spannerautoscalers.yaml index 9229ed2d..f4d71479 100644 --- a/config/crd/bases/spanner.mercari.com_spannerautoscalers.yaml +++ b/config/crd/bases/spanner.mercari.com_spannerautoscalers.yaml @@ -361,6 +361,18 @@ spec: be deleted in one scale-down operation. It can be a multiple of 100 for values < 1000, or a multiple of 1000 otherwise. type: integer + scaleupInterval: + description: How often autoscaler is reevaluated for scale up. + The warm up period between two consecutive scaleup operations. + If this option is omitted, the value of the `--scale-up-interval` + command line option is taken as the default value. + type: string + scaleupStepSize: + default: 0 + description: The maximum number of processing units which can + be added in one scale-up operation. It can be a multiple of + 100 for values < 1000, or a multiple of 1000 otherwise. + type: integer targetCPUUtilization: description: 'The CPU utilization which the autoscaling will try to achieve. Ref: [Spanner CPU utilization](https://cloud.google.com/spanner/docs/cpu-utilization#task-priority)' diff --git a/docs/crd-reference.md b/docs/crd-reference.md index b51c7ecc..e4c1eba1 100644 --- a/docs/crd-reference.md +++ b/docs/crd-reference.md @@ -92,7 +92,9 @@ _Appears in:_ | `nodes` _[ScaleConfigNodes](#scaleconfignodes)_ | If `nodes` are provided at the time of resource creation, then they are automatically converted to `processing-units`. So it is recommended to use only the processing units. Ref: [Spanner Compute Capacity](https://cloud.google.com/spanner/docs/compute-capacity#compute_capacity) | | `processingUnits` _[ScaleConfigPUs](#scaleconfigpus)_ | ProcessingUnits for scaling of the Spanner instance. Ref: [Spanner Compute Capacity](https://cloud.google.com/spanner/docs/compute-capacity#compute_capacity) | | `scaledownStepSize` _integer_ | The maximum number of processing units which can be deleted in one scale-down operation. It can be a multiple of 100 for values < 1000, or a multiple of 1000 otherwise. | -| `scaledownInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#duration-v1-meta)_ | How often autoscaler is reevaluated for scale down. The cool down period between two consecutive scaledown operations. If this option is omitted, the value of the `--scale-down-interval` command line option is taken as the default value. | +| `scaledownInterval` _Duration_ | How often autoscaler is reevaluated for scale down. The cool down period between two consecutive scaledown operations. If this option is omitted, the value of the `--scale-down-interval` command line option is taken as the default value. Duration string is a possibly sequence of decimal numbers, each with unit suffix, such as "300m", "1.5h" or "2h45m". | +| `scaleupStepSize` _integer_ | The maximum number of processing units which can be added in one scale-up operation. It can be a multiple of 100 for values < 1000, or a multiple of 1000 otherwise. | +| `scaleupInterval` _Duration_ | How often autoscaler is reevaluated for scale up. The warm up period between two consecutive scaleup operations. If this option is omitted, the value of the `--scale-up-interval` command line option is taken as the default value. Duration string is a possibly sequence of decimal numbers, each with unit suffix, such as "300m", "1.5h" or "2h45m". | | `targetCPUUtilization` _[TargetCPUUtilization](#targetcpuutilization)_ | The CPU utilization which the autoscaling will try to achieve. Ref: [Spanner CPU utilization](https://cloud.google.com/spanner/docs/cpu-utilization#task-priority) | diff --git a/internal/controller/spannerautoscaler_controller.go b/internal/controller/spannerautoscaler_controller.go index ec9982e5..f93a4d61 100644 --- a/internal/controller/spannerautoscaler_controller.go +++ b/internal/controller/spannerautoscaler_controller.go @@ -65,6 +65,7 @@ type SpannerAutoscalerReconciler struct { schedulers map[types.NamespacedName]schedulerpkg.Scheduler scaleDownInterval time.Duration + scaleUpInterval time.Duration clock utilclock.Clock log logr.Logger @@ -147,6 +148,19 @@ func (o withScaleDownInterval) applySpannerAutoscalerReconciler(r *SpannerAutosc r.scaleDownInterval = o.scaleDownInterval } +// Add scale-up-interval option for the autoscaler-reconciler +func WithScaleUpInterval(scaleUpInterval time.Duration) Option { + return withScaleUpInterval{scaleUpInterval: scaleUpInterval} +} + +type withScaleUpInterval struct { + scaleUpInterval time.Duration +} + +func (o withScaleUpInterval) applySpannerAutoscalerReconciler(r *SpannerAutoscalerReconciler) { + r.scaleUpInterval = o.scaleUpInterval +} + // NewSpannerAutoscalerReconciler returns a new SpannerAutoscalerReconciler. func NewSpannerAutoscalerReconciler( ctrlClient ctrlclient.Client, @@ -165,6 +179,7 @@ func NewSpannerAutoscalerReconciler( schedulers: make(map[types.NamespacedName]schedulerpkg.Scheduler), crons: make(map[types.NamespacedName]*cronpkg.Cron), scaleDownInterval: 55 * time.Minute, + scaleUpInterval: 60 * time.Second, clock: utilclock.RealClock{}, log: logger, } @@ -485,7 +500,7 @@ func (r *SpannerAutoscalerReconciler) needUpdateProcessingUnits(log logr.Logger, log.Info("no need to scale", "currentPU", currentProcessingUnits, "currentCPU", sa.Status.CurrentHighPriorityCPUUtilization) return false - case desiredProcessingUnits > currentProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(10*time.Second)): + case currentProcessingUnits < desiredProcessingUnits && r.clock.Now().Before(sa.Status.LastScaleTime.Time.Add(getOrConvertTimeDuration(sa.Spec.ScaleConfig.ScaleupInterval, r.scaleUpInterval))): log.Info("too short to scale up since last scale-up event", "timeGap", now.Sub(sa.Status.LastScaleTime.Time).String(), "now", now.String(), @@ -532,6 +547,7 @@ func calcDesiredProcessingUnits(sa spannerv1beta1.SpannerAutoscaler) int { } sdStepSize := sa.Spec.ScaleConfig.ScaledownStepSize + suStepSize := sa.Spec.ScaleConfig.ScaleupStepSize // round up the scaledownStepSize to avoid intermediate values // for example: 8000 -> 7000 instead of 8000 -> 7400 @@ -539,11 +555,27 @@ func calcDesiredProcessingUnits(sa spannerv1beta1.SpannerAutoscaler) int { sdStepSize = 1000 } + // round up the scaleupStepSize to avoid intermediate values + // for example: 7000 -> 8000 instead of 7000 -> 7600 + // To keep compatibility, check if scaleupStepSize is not 0 + if suStepSize != 0 && suStepSize < 1000 && sa.Status.CurrentProcessingUnits+suStepSize > 1000 { + suStepSize = 1000 + } + // in case of scaling down, check that we don't scale down beyond the ScaledownStepSize if scaledDownPU := (sa.Status.CurrentProcessingUnits - sdStepSize); desiredPU < scaledDownPU { desiredPU = scaledDownPU } + // in case of scaling up, check that we don't scale up beyond the ScaleupStepSize + if scaledUpPU := (sa.Status.CurrentProcessingUnits + suStepSize); suStepSize != 0 && scaledUpPU < desiredPU { + desiredPU = scaledUpPU + + if 1000 < desiredPU && desiredPU%1000 != 0 { + desiredPU = ((desiredPU / 1000) + 1) * 1000 + } + } + // keep the scaling between the specified min/max range minPU := sa.Spec.ScaleConfig.ProcessingUnits.Min maxPU := sa.Spec.ScaleConfig.ProcessingUnits.Max diff --git a/internal/controller/spannerautoscaler_controller_test.go b/internal/controller/spannerautoscaler_controller_test.go index 74d3e366..055cb78c 100644 --- a/internal/controller/spannerautoscaler_controller_test.go +++ b/internal/controller/spannerautoscaler_controller_test.go @@ -128,11 +128,25 @@ var _ = Describe("Check Update Nodes", func() { By("Creating a test reconciler") testReconciler = &SpannerAutoscalerReconciler{ scaleDownInterval: time.Hour, + scaleUpInterval: time.Hour, clock: testingclock.NewFakeClock(fakeTime), log: logr.Discard(), } }) + It("need to scale down nodes because enough time has elapsed since last update", func() { + sa := &spannerv1beta1.SpannerAutoscaler{ + Status: spannerv1beta1.SpannerAutoscalerStatus{ + LastScaleTime: metav1.Time{Time: fakeTime.Add(-2 * time.Hour)}, + CurrentProcessingUnits: 2000, + DesiredProcessingUnits: 1000, + InstanceState: spannerv1beta1.InstanceStateReady, + }, + } + got := testReconciler.needUpdateProcessingUnits(testReconciler.log, sa, sa.Status.DesiredProcessingUnits, fakeTime) + Expect(got).To(BeTrue()) + }) + It("does not need to scale down nodes because enough time has not elapsed since last update", func() { sa := &spannerv1beta1.SpannerAutoscaler{ Status: spannerv1beta1.SpannerAutoscalerStatus{ @@ -146,7 +160,7 @@ var _ = Describe("Check Update Nodes", func() { Expect(got).To(BeFalse()) }) - It("needs to scale up nodes because cooldown interval is only applied to scale down operations", func() { + It("need to scale up nodes because enough time has elapsed since last update", func() { sa := &spannerv1beta1.SpannerAutoscaler{ Spec: spannerv1beta1.SpannerAutoscalerSpec{ ScaleConfig: spannerv1beta1.ScaleConfig{ @@ -154,7 +168,7 @@ var _ = Describe("Check Update Nodes", func() { }, }, Status: spannerv1beta1.SpannerAutoscalerStatus{ - LastScaleTime: metav1.Time{Time: fakeTime.Add(-time.Minute)}, + LastScaleTime: metav1.Time{Time: fakeTime.Add(-2 * time.Hour)}, CurrentProcessingUnits: 1000, DesiredProcessingUnits: 2000, InstanceState: spannerv1beta1.InstanceStateReady, @@ -163,10 +177,23 @@ var _ = Describe("Check Update Nodes", func() { got := testReconciler.needUpdateProcessingUnits(testReconciler.log, sa, sa.Status.DesiredProcessingUnits, fakeTime) Expect(got).To(BeTrue()) }) + + It("does not need to scale up nodes because enough time has not elapsed since last update", func() { + sa := &spannerv1beta1.SpannerAutoscaler{ + Status: spannerv1beta1.SpannerAutoscalerStatus{ + LastScaleTime: metav1.Time{Time: fakeTime.Add(-time.Minute)}, + CurrentProcessingUnits: 1000, + DesiredProcessingUnits: 2000, + InstanceState: spannerv1beta1.InstanceStateReady, + }, + } + got := testReconciler.needUpdateProcessingUnits(testReconciler.log, sa, sa.Status.DesiredProcessingUnits, fakeTime) + Expect(got).To(BeFalse()) + }) }) var _ = DescribeTable("Calculate Desired Processing Units", - func(currentCPU, currentProcessingUnits, targetCPU, minProcessingUnits, maxProcessingUnits, scaledownStepSize, want int) { + func(currentCPU, currentProcessingUnits, targetCPU, minProcessingUnits, maxProcessingUnits, scaledownStepSize, scaleupStepSize, want int) { baseObj := spannerv1beta1.SpannerAutoscaler{} baseObj.Status.CurrentProcessingUnits = currentProcessingUnits baseObj.Status.CurrentHighPriorityCPUUtilization = currentCPU @@ -177,6 +204,7 @@ var _ = DescribeTable("Calculate Desired Processing Units", Max: maxProcessingUnits, }, ScaledownStepSize: scaledownStepSize, + ScaleupStepSize: scaleupStepSize, TargetCPUUtilization: spannerv1beta1.TargetCPUUtilization{ HighPriority: targetCPU, }, @@ -184,34 +212,45 @@ var _ = DescribeTable("Calculate Desired Processing Units", got := calcDesiredProcessingUnits(baseObj) Expect(got).To(Equal(want)) }, - Entry("should not scale", 25, 200, 30, 100, 1000, 2000, 200), - Entry("should scale up 1", 50, 300, 30, 100, 1000, 2000, 600), - Entry("should scale up 2", 50, 3000, 30, 1000, 10000, 2000, 6000), - Entry("should scale up 3", 50, 900, 40, 100, 5000, 2000, 2000), - Entry("should scale down 1", 30, 500, 50, 100, 10000, 2000, 400), - Entry("should scale down 2", 30, 5000, 50, 1000, 10000, 2000, 4000), - Entry("should scale down 3", 25, 1000, 65, 300, 10000, 800, 400), - Entry("should scale down 4", 25, 800, 65, 300, 10000, 800, 400), - Entry("should scale down 5", 25, 700, 65, 300, 10000, 800, 300), - Entry("should scale up to max PUs 1", 50, 300, 30, 100, 400, 2000, 400), - Entry("should scale up to max PUs 2", 50, 3000, 30, 1000, 4000, 2000, 4000), - Entry("should scale down to min PUs 1", 0, 500, 50, 100, 1000, 2000, 100), - Entry("should scale down to min PUs 2", 0, 5000, 50, 1000, 10000, 5000, 1000), - Entry("should scale down to min PUs 3", 0, 5000, 50, 100, 10000, 5000, 100), - Entry("should scale down with ScaledownStepSize 1", 30, 10000, 50, 5000, 10000, 2000, 8000), - Entry("should scale down with ScaledownStepSize 2", 30, 10000, 50, 5000, 12000, 200, 9000), - Entry("should scale down with ScaledownStepSize 3", 30, 10000, 50, 5000, 12000, 100, 9000), - Entry("should scale down with ScaledownStepSize 4", 30, 10000, 50, 5000, 12000, 900, 9000), - Entry("should scale down with ScaledownStepSize 5", 25, 2000, 65, 300, 10000, 500, 1000), - Entry("should scale down with ScaledownStepSize 6", 25, 2000, 65, 300, 10000, 800, 1000), - Entry("should scale down with ScaledownStepSize 7", 25, 1000, 65, 300, 10000, 500, 500), - Entry("should scale down with ScaledownStepSize 8", 20, 800, 75, 300, 10000, 200, 600), - Entry("should scale down with ScaledownStepSize 9", 25, 2000, 50, 300, 10000, 500, 2000), - Entry("should scale down with ScaledownStepSize 10", 25, 2000, 50, 300, 10000, 1000, 2000), - Entry("should scale down with ScaledownStepSize 11", 25, 2000, 70, 300, 10000, 400, 1000), - Entry("should scale down with ScaledownStepSize 12", 25, 1000, 70, 300, 10000, 400, 600), - Entry("should scale down with ScaledownStepSize 13", 20, 2000, 50, 300, 10000, 200, 1000), - Entry("should scale down with ScaledownStepSize 14", 20, 2000, 75, 300, 10000, 200, 1000), + Entry("should not scale", 25, 200, 30, 100, 1000, 2000, 0, 200), + Entry("should scale up 1", 50, 300, 30, 100, 1000, 2000, 0, 600), + Entry("should scale up 2", 50, 1000, 30, 1000, 10000, 2000, 0, 2000), + Entry("should scale up 3", 50, 900, 40, 100, 5000, 2000, 0, 2000), + Entry("should scale down 1", 30, 500, 50, 100, 10000, 2000, 0, 400), + Entry("should scale down 2", 30, 5000, 50, 1000, 10000, 2000, 0, 4000), + Entry("should scale down 3", 25, 1000, 65, 300, 10000, 800, 0, 400), + Entry("should scale down 4", 25, 800, 65, 300, 10000, 800, 0, 400), + Entry("should scale down 5", 25, 700, 65, 300, 10000, 800, 0, 300), + Entry("should scale up to max PUs 1", 50, 300, 30, 100, 400, 2000, 0, 400), + Entry("should scale up to max PUs 2", 50, 3000, 30, 1000, 4000, 2000, 0, 4000), + Entry("should scale down to min PUs 1", 0, 500, 50, 100, 1000, 2000, 0, 100), + Entry("should scale down to min PUs 2", 0, 5000, 50, 1000, 10000, 5000, 0, 1000), + Entry("should scale down to min PUs 3", 0, 5000, 50, 100, 10000, 5000, 0, 100), + Entry("should scale down with ScaledownStepSize 1", 30, 10000, 50, 5000, 10000, 2000, 0, 8000), + Entry("should scale down with ScaledownStepSize 2", 30, 10000, 50, 5000, 12000, 200, 0, 9000), + Entry("should scale down with ScaledownStepSize 3", 30, 10000, 50, 5000, 12000, 100, 0, 9000), + Entry("should scale down with ScaledownStepSize 4", 30, 10000, 50, 5000, 12000, 900, 0, 9000), + Entry("should scale down with ScaledownStepSize 5", 25, 2000, 65, 300, 10000, 500, 0, 1000), + Entry("should scale down with ScaledownStepSize 6", 25, 2000, 65, 300, 10000, 800, 0, 1000), + Entry("should scale down with ScaledownStepSize 7", 25, 1000, 65, 300, 10000, 500, 0, 500), + Entry("should scale down with ScaledownStepSize 8", 20, 800, 75, 300, 10000, 200, 0, 600), + Entry("should scale down with ScaledownStepSize 9", 25, 2000, 50, 300, 10000, 500, 0, 2000), + Entry("should scale down with ScaledownStepSize 10", 25, 2000, 50, 300, 10000, 1000, 0, 2000), + Entry("should scale down with ScaledownStepSize 11", 25, 2000, 70, 300, 10000, 400, 0, 1000), + Entry("should scale down with ScaledownStepSize 12", 25, 1000, 70, 300, 10000, 400, 0, 600), + Entry("should scale down with ScaledownStepSize 13", 20, 2000, 50, 300, 10000, 200, 0, 1000), + Entry("should scale down with ScaledownStepSize 14", 20, 2000, 75, 300, 10000, 200, 0, 1000), + Entry("should scale up with ScaleupStepSize when currentPU is equal to zero 1", 80, 100, 10, 100, 10000, 2000, 0, 900), + Entry("should scale up with ScaleupStepSize when currentPU is equal to zero 2", 100, 100, 10, 100, 10000, 2000, 0, 2000), + Entry("should scale up with ScaleupStepSize when currentPU is lower than 1000 1", 80, 100, 10, 100, 10000, 2000, 700, 800), + Entry("should scale up with ScaleupStepSize when currentPU is lower than 1000 2", 100, 300, 10, 100, 10000, 2000, 700, 1000), + Entry("should scale up with ScaleupStepSize when currentPU is lower than 1000 3", 100, 800, 10, 100, 10000, 2000, 700, 2000), + Entry("should scale up with ScaleupStepSize when currentPU is equal to 1000 1", 100, 1000, 50, 100, 10000, 2000, 700, 2000), + Entry("should scale up with ScaleupStepSize when currentPU is equal to 1000 2", 100, 1000, 30, 100, 10000, 2000, 2000, 3000), + Entry("should scale up with ScaleupStepSize when currentPU is equal to 1000 3", 100, 1000, 10, 100, 10000, 2000, 3000, 4000), + Entry("should scale up with ScaleupStepSize when currentPU is more than 1000 1", 30, 2000, 20, 100, 10000, 2000, 700, 3000), + Entry("should scale up with ScaleupStepSize when currentPU is more than 1000 2", 20, 2000, 10, 100, 10000, 2000, 2000, 4000), + Entry("should scale up with ScaleupStepSize when currentPU is more than 1000 3", 100, 2000, 10, 100, 10000, 2000, 3000, 5000), ) var _ = Describe("Fetch Credentials", func() { @@ -357,56 +396,19 @@ var _ = Describe("Fetch Credentials", func() { }) }) -var _ = Describe("Get and overwrite scaledown interval", func() { - var testReconciler *SpannerAutoscalerReconciler - controllerScaleDownInterval := 55 * time.Minute +var _ = Describe("Get and overwrite interval", func() { + defaultInterval := 55 * time.Minute - BeforeEach(func() { - By("Creating a test reconciler") - testReconciler = &SpannerAutoscalerReconciler{ - scaleDownInterval: controllerScaleDownInterval, - clock: testingclock.NewFakeClock(fakeTime), - log: logr.Discard(), - } - }) - - It("should get controller default scaledown interval", func() { - want := controllerScaleDownInterval - sa := &spannerv1beta1.SpannerAutoscaler{ - Status: spannerv1beta1.SpannerAutoscalerStatus{ - LastScaleTime: metav1.Time{Time: fakeTime.Add(-time.Minute)}, - CurrentProcessingUnits: 2000, - DesiredProcessingUnits: 1000, - InstanceState: spannerv1beta1.InstanceStateReady, - }, - Spec: spannerv1beta1.SpannerAutoscalerSpec{ - ScaleConfig: spannerv1beta1.ScaleConfig{}, - }, - } - got := getOrConvertTimeDuration(sa.Spec.ScaleConfig.ScaledownInterval, testReconciler.scaleDownInterval) + It("should get defaultInterval if customInterval == nil", func() { + want := defaultInterval + got := getOrConvertTimeDuration(nil, defaultInterval) Expect(got).To(Equal(want)) }) - It("should override default scaledown interval with custom SpannerAutoscaler configuration value", func() { + It("should override defaultInterval with customInterval.Duration if customInterval != nil", func() { want := 20 * time.Minute - scaledownInterval := metav1.Duration{ - Duration: want, - } - sa := &spannerv1beta1.SpannerAutoscaler{ - Status: spannerv1beta1.SpannerAutoscalerStatus{ - LastScaleTime: metav1.Time{Time: fakeTime.Add(-time.Minute)}, - CurrentProcessingUnits: 2000, - DesiredProcessingUnits: 1000, - InstanceState: spannerv1beta1.InstanceStateReady, - }, - Spec: spannerv1beta1.SpannerAutoscalerSpec{ - ScaleConfig: spannerv1beta1.ScaleConfig{ - ScaledownInterval: &scaledownInterval, - }, - }, - } - - got := getOrConvertTimeDuration(sa.Spec.ScaleConfig.ScaledownInterval, testReconciler.scaleDownInterval) + customInterval := &metav1.Duration{Duration: want} + got := getOrConvertTimeDuration(customInterval, defaultInterval) Expect(got).To(Equal(want)) }) })