Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automatic switch to emergency mode when metrics unavailable #424

Open
wants to merge 37 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
438f653
upgrade kubebuilder to plugin/v4
randytqwjp Oct 10, 2024
df7b7b8
add test utils
randytqwjp Oct 10, 2024
52780ef
fix controller test
randytqwjp Oct 22, 2024
9c153b5
fix gha test
randytqwjp Oct 22, 2024
f4454f4
chmod tortoisectl test
randytqwjp Oct 22, 2024
bba0747
edit tortoisectl
randytqwjp Oct 23, 2024
a968c7a
fix lint
randytqwjp Oct 23, 2024
691c4f4
fix lint
randytqwjp Oct 23, 2024
e28d881
add lint-fix to ci
randytqwjp Oct 23, 2024
192fecf
go mod tidy
randytqwjp Oct 23, 2024
4343ea9
add make dependencies
randytqwjp Oct 23, 2024
93556df
remove lint-fix
randytqwjp Oct 23, 2024
5cda06c
upgrade tools
randytqwjp Oct 23, 2024
0fbfff5
lint-fix
randytqwjp Oct 23, 2024
ae2334a
add tool chain version
randytqwjp Oct 23, 2024
28080e3
change toolchain to 1.22
randytqwjp Oct 23, 2024
ca36611
add timeout
randytqwjp Oct 23, 2024
5677abc
remove lint-fix
randytqwjp Oct 23, 2024
a6b0318
edit licenses
randytqwjp Oct 29, 2024
e8454e9
remove chmod
randytqwjp Nov 1, 2024
3725788
Merge branch 'main' of github.com:mercari/tortoise into kubebuilder-i…
randytqwjp Nov 26, 2024
ba4351e
automatic emergency mode trigger when kube metrics unavailable for hpa
randytqwjp Nov 26, 2024
11d8cb8
add return statement
randytqwjp Nov 26, 2024
c056d82
clean up code
randytqwjp Nov 28, 2024
b47578c
clean up code
randytqwjp Nov 28, 2024
7d07efd
add hpa test and try to fix controller test
randytqwjp Dec 3, 2024
12c7b05
fix old controller tests
randytqwjp Dec 4, 2024
ed22342
add controller test and fix checkHPAStatus function
randytqwjp Dec 6, 2024
bddb139
clean up code
randytqwjp Dec 6, 2024
0dc2749
remove autoemergency phase and use emergency instead
randytqwjp Dec 6, 2024
dd7c10a
fix lint
randytqwjp Dec 6, 2024
2e261c4
refactor tortoisephase change into tortoise service and write unit tests
randytqwjp Dec 12, 2024
7c0e997
fix lint
randytqwjp Dec 13, 2024
5be0c1a
fix lint
randytqwjp Dec 13, 2024
d8ae58d
fix review comments
randytqwjp Dec 19, 2024
46d71a9
fix nits
randytqwjp Jan 9, 2025
e41aab4
fix nits
randytqwjp Jan 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ spec:
strategy: {}
template:
metadata:
annotations:
kubectl.kubernetes.io/restartedAt: "2023-01-01T00:00:00Z"
annotations: null
creationTimestamp: null
labels:
app: mercari
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,110 +20,97 @@ status:
cpu: Vertical
memory: Vertical
conditions:
containerResourceRequests:
- containerName: app
resource:
cpu: "10"
memory: 10Gi
- containerName: istio-proxy
resource:
cpu: "4"
memory: 4Gi
tortoiseConditions:
- lastTransitionTime: "2023-01-01T00:00:00Z"
lastUpdateTime: "2023-01-01T00:00:00Z"
status: "False"
type: FailedToReconcile
containerRecommendationFromVPA:
- containerName: app
maxRecommendation:
cpu:
quantity: "3"
updatedAt: "2023-01-01T00:00:00Z"
quantity: "10"
updatedAt: null
memory:
quantity: 3Gi
updatedAt: "2023-01-01T00:00:00Z"
quantity: 10Gi
updatedAt: null
recommendation:
cpu:
quantity: "3"
updatedAt: "2023-01-01T00:00:00Z"
quantity: "10"
updatedAt: null
memory:
quantity: 3Gi
updatedAt: "2023-01-01T00:00:00Z"
quantity: 10Gi
updatedAt: null
- containerName: istio-proxy
maxRecommendation:
cpu:
quantity: "3"
updatedAt: "2023-01-01T00:00:00Z"
quantity: "4"
updatedAt: null
memory:
quantity: 3Gi
updatedAt: "2023-01-01T00:00:00Z"
quantity: 4Gi
updatedAt: null
recommendation:
cpu:
quantity: "3"
updatedAt: "2023-01-01T00:00:00Z"
quantity: "4"
updatedAt: null
memory:
quantity: 3Gi
updatedAt: "2023-01-01T00:00:00Z"
containerResourceRequests:
- containerName: app
resource:
cpu: "10"
memory: 3Gi
- containerName: istio-proxy
resource:
cpu: "3"
memory: 3Gi
tortoiseConditions:
- lastTransitionTime: "2023-01-01T00:00:00Z"
lastUpdateTime: "2023-01-01T00:00:00Z"
message: the current number of replicas is not bigger than the preferred max
replica number
reason: ScaledUpBasedOnPreferredMaxReplicas
status: "False"
type: ScaledUpBasedOnPreferredMaxReplicas
- lastTransitionTime: "2023-01-01T00:00:00Z"
lastUpdateTime: "2023-01-01T00:00:00Z"
message: The recommendation is provided
status: "True"
type: VerticalRecommendationUpdated
- lastTransitionTime: "2023-01-01T00:00:00Z"
lastUpdateTime: "2023-01-01T00:00:00Z"
status: "False"
type: FailedToReconcile
containerResourcePhases:
- containerName: app
resourcePhases:
cpu:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: GatheringData
memory:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: Working
- containerName: istio-proxy
resourcePhases:
cpu:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: Working
memory:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: Working
quantity: 4Gi
updatedAt: null
recommendations:
horizontal:
maxReplicas:
- from: 0
timezone: Local
to: 24
updatedAt: "2023-01-01T00:00:00Z"
updatedAt: "2023-10-06T01:15:47Z"
value: 20
minReplicas:
- from: 0
timezone: Local
to: 24
updatedAt: "2023-01-01T00:00:00Z"
updatedAt: "2023-10-06T01:15:47Z"
value: 5
targetUtilizations:
- containerName: app
targetUtilization:
cpu: 70
targetUtilization: {}
- containerName: istio-proxy
targetUtilization: {}
vertical:
containerResourceRecommendation:
- RecommendedResource:
cpu: "10"
memory: 3Gi
memory: 10Gi
containerName: app
- RecommendedResource:
cpu: "3"
memory: 3Gi
cpu: "4"
memory: 4Gi
containerName: istio-proxy
containerResourcePhases:
- containerName: app
resourcePhases:
cpu:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: GatheringData
memory:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: Working
- containerName: istio-proxy
resourcePhases:
cpu:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: Working
memory:
lastTransitionTime: "2023-01-01T00:00:00Z"
phase: Working
targets:
horizontalPodAutoscaler: tortoise-hpa-mercari
scaleTargetRef:
Expand All @@ -132,4 +119,4 @@ status:
verticalPodAutoscalers:
- name: tortoise-monitor-mercari
role: Monitor
tortoisePhase: Working
tortoisePhase: PartlyWorking
21 changes: 11 additions & 10 deletions internal/controller/tortoise_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ var (
func (r *TortoiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) {
logger := log.FromContext(ctx)
now := time.Now()
hpaCreated := false
if onlyTestNow != nil {
now = *onlyTestNow
}
Expand Down Expand Up @@ -204,7 +203,7 @@ func (r *TortoiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
return ctrl.Result{}, fmt.Errorf("add finalizer: %w", err)
}

tortoise, hpaCreated, err = r.HpaService.UpdateHPASpecFromTortoiseAutoscalingPolicy(ctx, tortoise, hpa, currentDesiredReplicaNum, now)
tortoise, err = r.HpaService.UpdateHPASpecFromTortoiseAutoscalingPolicy(ctx, tortoise, hpa, currentDesiredReplicaNum, now)
if err != nil {
logger.Error(err, "update HPA spec from Tortoise autoscaling policy", "tortoise", req.NamespacedName)
return ctrl.Result{}, err
Expand Down Expand Up @@ -234,20 +233,22 @@ func (r *TortoiseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
// VPA is ready, we mark all Vertical scaling resources as Running.
tortoise = vpa.SetAllVerticalContainerResourcePhaseWorking(tortoise, now)

isReady := false
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: we don't need to define this here?


logger.Info("VPA created by tortoise is ready, proceeding to generate the recommendation", "tortoise", req.NamespacedName)
hpa, err = r.HpaService.GetHPAOnTortoise(ctx, tortoise)
hpa, isReady, err = r.HpaService.GetHPAOnTortoise(ctx, tortoise)
if err != nil {
logger.Error(err, "failed to get HPA", "tortoise", req.NamespacedName)
return ctrl.Result{}, err
}
scalingActive, err := r.HpaService.CheckHpaMetricStatus(ctx, hpa)
if err != nil {
if tortoise.Status.TortoisePhase == autoscalingv1beta3.TortoisePhaseWorking && !hpaCreated {
logger.Error(err, "HPA status abnormal", "tortoise", req.NamespacedName)
return ctrl.Result{}, err
}
err = nil

if !isReady {
// HPA is correctly fetched, but looks like not ready yet. We won't be able to calculate things correctly, and hence stop the reconciliation here.
logger.Info("HPA not ready, abort reconcile")
randytqwjp marked this conversation as resolved.
Show resolved Hide resolved
return ctrl.Result{}, nil
randytqwjp marked this conversation as resolved.
Show resolved Hide resolved
}
scalingActive := r.HpaService.CheckHpaMetricStatus(ctx, hpa)

tortoise, err = r.TortoiseService.UpdateTortoisePhaseIfHPAIsUnhealthy(ctx, scalingActive, tortoise)
if err != nil {
logger.Error(err, "Tortoise could not switch to emergency mode", "tortoise", req.NamespacedName)
Expand Down
59 changes: 27 additions & 32 deletions pkg/hpa/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,19 +286,23 @@ func (c *Service) GetHPAOnTortoiseSpec(ctx context.Context, tortoise *autoscalin
return hpa, nil
}

func (c *Service) GetHPAOnTortoise(ctx context.Context, tortoise *autoscalingv1beta3.Tortoise) (*v2.HorizontalPodAutoscaler, error) {
func (c *Service) GetHPAOnTortoise(ctx context.Context, tortoise *autoscalingv1beta3.Tortoise) (*v2.HorizontalPodAutoscaler, bool, error) {
if !HasHorizontal(tortoise) {
// there should be no HPA
return nil, nil
return nil, true, nil
}
hpa := &v2.HorizontalPodAutoscaler{}
if err := c.c.Get(ctx, types.NamespacedName{Namespace: tortoise.Namespace, Name: tortoise.Status.Targets.HorizontalPodAutoscaler}, hpa); err != nil {
return nil, fmt.Errorf("failed to get hpa on tortoise: %w", err)
return nil, false, fmt.Errorf("failed to get hpa on tortoise: %w", err)
}
if reflect.DeepEqual(hpa.Status, v2.HorizontalPodAutoscalerStatus{}) || hpa.Status.Conditions == nil || hpa.Status.CurrentMetrics == nil {
// Most likely, HPA is just created and not yet handled by HPA controller.
return nil, false, nil
}

recordHPAMetric(ctx, tortoise, hpa)

return hpa, nil
return hpa, true, nil
}

func (s *Service) UpdatingHPATargetUtilizationAllowed(tortoise *autoscalingv1beta3.Tortoise, now time.Time) (*autoscalingv1beta3.Tortoise, bool) {
Expand Down Expand Up @@ -498,33 +502,32 @@ func (c *Service) UpdateHPASpecFromTortoiseAutoscalingPolicy(
givenHPA *v2.HorizontalPodAutoscaler,
replicaNum int32,
now time.Time,
) (*autoscalingv1beta3.Tortoise, bool, error) {
hpaCreated := false
) (*autoscalingv1beta3.Tortoise, error) {
if tortoise.Spec.UpdateMode == autoscalingv1beta3.UpdateModeOff {
// When UpdateMode is Off, we don't update HPA.
return tortoise, hpaCreated, nil
return tortoise, nil
}

if !HasHorizontal(tortoise) {
if tortoise.Spec.TargetRefs.HorizontalPodAutoscalerName == nil {
// HPA should be created by Tortoise, which can be deleted.
err := c.DeleteHPACreatedByTortoise(ctx, tortoise)
if err != nil && !apierrors.IsNotFound(err) {
return tortoise, hpaCreated, fmt.Errorf("delete hpa created by tortoise: %w", err)
return tortoise, fmt.Errorf("delete hpa created by tortoise: %w", err)
}
c.recorder.Event(tortoise, corev1.EventTypeNormal, event.HPADeleted, fmt.Sprintf("Deleted a HPA %s/%s because tortoise has no resource to scale horizontally", tortoise.Namespace, tortoise.Status.Targets.HorizontalPodAutoscaler))
} else {
// We cannot delete the HPA because it's specified by the user.
err := c.disableHPA(ctx, tortoise, replicaNum)
if err != nil {
return tortoise, hpaCreated, fmt.Errorf("disable hpa: %w", err)
return tortoise, fmt.Errorf("disable hpa: %w", err)
}
c.recorder.Event(tortoise, corev1.EventTypeNormal, event.HPADisabled, fmt.Sprintf("Disabled a HPA %s/%s because tortoise has no resource to scale horizontally", tortoise.Namespace, tortoise.Status.Targets.HorizontalPodAutoscaler))
}

// No need to edit container resource phase.

return tortoise, hpaCreated, nil
return tortoise, nil
}

hpa := &v2.HorizontalPodAutoscaler{}
Expand All @@ -536,23 +539,22 @@ func (c *Service) UpdateHPASpecFromTortoiseAutoscalingPolicy(
// - In that case, we need to create an initial HPA or give an annotation to existing HPA.
tortoise, err = c.InitializeHPA(ctx, tortoise, replicaNum, now)
if err != nil {
return tortoise, hpaCreated, fmt.Errorf("initialize hpa: %w", err)
return tortoise, fmt.Errorf("initialize hpa: %w", err)
}
hpaCreated = true

c.recorder.Event(tortoise, corev1.EventTypeNormal, event.HPACreated, fmt.Sprintf("Initialized a HPA %s/%s because tortoise has resource to scale horizontally", tortoise.Namespace, tortoise.Status.Targets.HorizontalPodAutoscaler))
return tortoise, hpaCreated, nil
return tortoise, nil
}

return tortoise, hpaCreated, fmt.Errorf("failed to get hpa on tortoise: %w", err)
return tortoise, fmt.Errorf("failed to get hpa on tortoise: %w", err)
}

var newhpa *v2.HorizontalPodAutoscaler
var isHpaEdited bool
newhpa, tortoise, isHpaEdited = c.syncHPAMetricsWithTortoiseAutoscalingPolicy(ctx, tortoise, hpa, now)
if !isHpaEdited {
// User didn't change anything.
return tortoise, hpaCreated, nil
return tortoise, nil
}

retryNumber := -1
Expand All @@ -571,12 +573,12 @@ func (c *Service) UpdateHPASpecFromTortoiseAutoscalingPolicy(
}

if err := retry.RetryOnConflict(retry.DefaultRetry, updateFn); err != nil {
return tortoise, hpaCreated, fmt.Errorf("update hpa: %w (%v times retried)", err, replicaNum)
return tortoise, fmt.Errorf("update hpa: %w (%v times retried)", err, replicaNum)
}

c.recorder.Event(tortoise, corev1.EventTypeNormal, event.HPAUpdated, fmt.Sprintf("Updated a HPA %s/%s because the autoscaling policy is changed in the tortoise", tortoise.Namespace, tortoise.Status.Targets.HorizontalPodAutoscaler))

return tortoise, hpaCreated, nil
return tortoise, nil
}

func HasHorizontal(tortoise *autoscalingv1beta3.Tortoise) bool {
Expand Down Expand Up @@ -769,25 +771,18 @@ func (c *Service) excludeExternalMetric(ctx context.Context, hpa *v2.HorizontalP
return newHPA
}

func (c *Service) CheckHpaMetricStatus(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler) (bool, error) {
func (c *Service) CheckHpaMetricStatus(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler) bool {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit

Suggested change
func (c *Service) CheckHpaMetricStatus(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler) bool {
func (c *Service) IsHpaMetricAvailable(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler) bool {

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, can we do like this instead? It would eliminate the logic from the controller layer.

func (c *Service) UpdateTortoisePhaseIfHPAIsUnhealthy(ctx context.Context, currenthpa *v2.HorizontalPodAutoscaler, tortoise *v1beta1.Tortoise) error {
...

			if condition.Type == "ScalingActive" && condition.Status == "False" && condition.Reason == "FailedGetResourceMetric" {
				//switch to Emergency mode since no metrics
				logger.Info("HPA failed to get resource metrics, switch to emergency mode")
				tortoise.Status.TortoisePhase = v1beta3.TortoisePhaseEmergency
				return nil
			}

}

Then, probably it'd make more sense to move inside the tortoise service, instead of hpa service.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wont this add extra responsibilities to tortoise service if the service has to manage hpa conditions as well? Would it be better to add a UpdateTortoisePhaseIfHPAIsUnhealthy function in tortoise service but still perform the check in hpa service? so we only pass in the scalingActive bool into tortoise service instead of the entire hpa. Im not sure which will be cleaner

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wont this add extra responsibilities to tortoise service if the service has to manage hpa conditions as well

Hmm, that's understandable. So,

Would it be better to add a UpdateTortoisePhaseIfHPAIsUnhealthy function in tortoise service but still perform the check in hpa service? so we only pass in the scalingActive bool into tortoise service instead of the entire hpa.

this direction looks good.

randytqwjp marked this conversation as resolved.
Show resolved Hide resolved
//currenthpa = currenthpa.DeepCopy()
randytqwjp marked this conversation as resolved.
Show resolved Hide resolved
logger := log.FromContext(ctx)
if currenthpa == nil {
logger.Info("empty HPA passed into status check, ignore")
return true, nil
}

if reflect.DeepEqual(currenthpa.Status, v2.HorizontalPodAutoscalerStatus{}) {
return false, fmt.Errorf("HPA empty status, switch to emergency mode")
return true
}

if currenthpa.Status.Conditions == nil {
return false, fmt.Errorf("HPA empty conditions, switch to emergency mode")
if reflect.DeepEqual(currenthpa.Status, v2.HorizontalPodAutoscalerStatus{}) || currenthpa.Status.Conditions == nil || currenthpa.Status.CurrentMetrics == nil {
return true
}

if currenthpa.Status.CurrentMetrics == nil {
return false, fmt.Errorf("HPA no metrics, switch to emergency mode")
}
conditions := currenthpa.Status.Conditions
currentMetrics := currenthpa.Status.CurrentMetrics

Expand All @@ -796,7 +791,7 @@ func (c *Service) CheckHpaMetricStatus(ctx context.Context, currenthpa *v2.Horiz
if condition.Type == "ScalingActive" && condition.Status == "False" && condition.Reason == "FailedGetResourceMetric" {
//switch to Emergency mode since no metrics
logger.Info("HPA failed to get resource metrics, switch to emergency mode")
return false, nil
return false
}
}
}
Expand All @@ -805,14 +800,14 @@ func (c *Service) CheckHpaMetricStatus(ctx context.Context, currenthpa *v2.Horiz
for _, currentMetric := range currentMetrics {
if !currentMetric.ContainerResource.Current.Value.IsZero() {
//Can still get metrics for some containers, scale based on those
randytqwjp marked this conversation as resolved.
Show resolved Hide resolved
return true, nil
return true
}
}
logger.Info("HPA all metrics return 0, switch to emergency mode")
randytqwjp marked this conversation as resolved.
Show resolved Hide resolved
return false, nil
return false
}

logger.Info("HPA status check passed")

return true, nil
return true
}
Loading
Loading