From 07f9053fe3a6a2c0f468a12842c4f51c58d998cb Mon Sep 17 00:00:00 2001 From: jayson wang Date: Sun, 1 Dec 2024 17:05:34 +0800 Subject: [PATCH] dynamically check if a resource is scalable --- internal/dao/registry.go | 86 ++++++++++++++------------------- internal/dao/scalable.go | 6 ++- internal/dao/types.go | 5 +- internal/view/command.go | 10 ++-- internal/view/scale_extender.go | 31 +++++++----- internal/watch/factory.go | 15 ++++-- 6 files changed, 79 insertions(+), 74 deletions(-) diff --git a/internal/dao/registry.go b/internal/dao/registry.go index e65858e65d..3a2d6d3d42 100644 --- a/internal/dao/registry.go +++ b/internal/dao/registry.go @@ -5,17 +5,20 @@ package dao import ( "fmt" + "slices" "sort" "strings" "sync" - "github.com/derailed/k9s/internal/client" "github.com/rs/zerolog/log" + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/derailed/k9s/internal/client" ) const ( @@ -95,10 +98,7 @@ func AccessorFor(f Factory, gvr client.GVR) (Accessor, error) { r, ok := m[gvr] if !ok { - r = new(Generic) - if MetaAccess.IsScalable(gvr) { - r = new(Scaler) - } + r = new(Scaler) log.Debug().Msgf("No DAO registry entry for %q. Using generics!", gvr) } r.Init(f, gvr) @@ -144,12 +144,7 @@ func (m *Meta) GVK2GVR(gv schema.GroupVersion, kind string) (client.GVR, bool, b // IsCRD checks if resource represents a CRD func IsCRD(r metav1.APIResource) bool { - for _, c := range r.Categories { - if c == crdCat { - return true - } - } - return false + return slices.Contains(r.Categories, crdCat) } // MetaFor returns a resource metadata for a given gvr. @@ -166,24 +161,19 @@ func (m *Meta) MetaFor(gvr client.GVR) (metav1.APIResource, error) { // IsK8sMeta checks for non resource meta. func IsK8sMeta(m metav1.APIResource) bool { - for _, c := range m.Categories { - if c == k9sCat || c == helmCat { - return false - } - } - - return true + return !slices.ContainsFunc(m.Categories, func(category string) bool { + return category == k9sCat || category == helmCat + }) } // IsK9sMeta checks for non resource meta. func IsK9sMeta(m metav1.APIResource) bool { - for _, c := range m.Categories { - if c == k9sCat { - return true - } - } + return slices.Contains(m.Categories, k9sCat) +} - return false +// IsScalable check if the resource can be scaled +func IsScalable(m metav1.APIResource) bool { + return slices.Contains(m.Categories, scaleCat) } // LoadResources hydrates server preferred+CRDs resource metadata. @@ -196,22 +186,12 @@ func (m *Meta) LoadResources(f Factory) error { return err } loadNonResource(m.resMetas) - loadCRDs(f, m.resMetas) - return nil -} - -// IsScalable check if the resource can be scaled -func (m *Meta) IsScalable(gvr client.GVR) bool { - if meta, ok := m.resMetas[gvr]; ok { - for _, c := range meta.Categories { - if c == scaleCat { - return true - } - } - } + // We've actually loaded all the CRDs in loadPreferred, and we're now adding + // some additional CRD properties on top of that. + go m.loadCRDs(f) - return false + return nil } // BOZO!! Need countermeasures for direct commands! @@ -419,11 +399,16 @@ func isDeprecated(gvr client.GVR) bool { return ok } -func loadCRDs(f Factory, m ResourceMetas) { +// loadCRDs Wait for the cache to synced and then add some additional properties to CRD. +func (m *Meta) loadCRDs(f Factory) { if f.Client() == nil || !f.Client().ConnectionOK() { return } - oo, err := f.List(crdGVR, client.ClusterScope, false, labels.Everything()) + + // we must block until all CRDs caches were synced + f.WaitForCacheSync() + + oo, err := f.List(crdGVR, client.ClusterScope, true, labels.Everything()) if err != nil { log.Warn().Err(err).Msgf("Fail CRDs load") return @@ -440,25 +425,28 @@ func loadCRDs(f Factory, m ResourceMetas) { var meta metav1.APIResource meta.Kind = crd.Spec.Names.Kind meta.Group = crd.Spec.Group - meta.Name = crd.Name + meta.Name = crd.Spec.Names.Plural meta.SingularName = crd.Spec.Names.Singular meta.ShortNames = crd.Spec.Names.ShortNames meta.Namespaced = crd.Spec.Scope == apiext.NamespaceScoped for _, v := range crd.Spec.Versions { if v.Served && !v.Deprecated { + if !slices.Contains(meta.Categories, scaleCat) { + meta.Categories = append(meta.Categories, scaleCat) + } + meta.Version = v.Name break } } - // meta, errs := extractMeta(o) - // if len(errs) > 0 { - // log.Error().Err(errs[0]).Msgf("Fail to extract CRD meta (%d) errors", len(errs)) - // continue - // } - meta.Categories = append(meta.Categories, crdCat) - gvr := client.NewGVRFromMeta(meta) - m[gvr] = meta + if !slices.Contains(meta.Categories, crdCat) { + meta.Categories = append(meta.Categories, crdCat) + } + + m.mx.Lock() + m.resMetas[client.NewGVRFromMeta(meta)] = meta + m.mx.Unlock() } } diff --git a/internal/dao/scalable.go b/internal/dao/scalable.go index 6c13f45d3c..7664b5355d 100644 --- a/internal/dao/scalable.go +++ b/internal/dao/scalable.go @@ -1,14 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of K9s + package dao import ( "context" - "github.com/derailed/k9s/internal/client" "github.com/rs/zerolog/log" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic" "k8s.io/client-go/restmapper" "k8s.io/client-go/scale" + + "github.com/derailed/k9s/internal/client" ) var _ Scalable = (*Scaler)(nil) diff --git a/internal/dao/types.go b/internal/dao/types.go index 014c668e4c..8e7602f294 100644 --- a/internal/dao/types.go +++ b/internal/dao/types.go @@ -8,14 +8,15 @@ import ( "io" "time" - "github.com/derailed/k9s/internal/client" - "github.com/derailed/k9s/internal/watch" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" restclient "k8s.io/client-go/rest" + + "github.com/derailed/k9s/internal/client" + "github.com/derailed/k9s/internal/watch" ) // ResourceMetas represents a collection of resource metadata. diff --git a/internal/view/command.go b/internal/view/command.go index abdf09ab26..bf42956eb4 100644 --- a/internal/view/command.go +++ b/internal/view/command.go @@ -11,11 +11,12 @@ import ( "strings" "sync" + "github.com/rs/zerolog/log" + "github.com/derailed/k9s/internal/client" "github.com/derailed/k9s/internal/dao" "github.com/derailed/k9s/internal/model" "github.com/derailed/k9s/internal/view/cmd" - "github.com/rs/zerolog/log" ) var ( @@ -288,12 +289,7 @@ func (c *Command) viewMetaFor(p *cmd.Interpreter) (client.GVR, *MetaViewer, erro v := MetaViewer{ viewerFn: func(gvr client.GVR) ResourceViewer { - viewer := NewOwnerExtender(NewBrowser(gvr)) - if dao.MetaAccess.IsScalable(gvr) { - viewer = NewScaleExtender(viewer) - } - - return viewer + return NewScaleExtender(NewOwnerExtender(NewBrowser(gvr))) }, } if mv, ok := customViewers[gvr]; ok { diff --git a/internal/view/scale_extender.go b/internal/view/scale_extender.go index 45fddaf9ba..922ca1000a 100644 --- a/internal/view/scale_extender.go +++ b/internal/view/scale_extender.go @@ -9,13 +9,13 @@ import ( "strconv" "strings" - "github.com/derailed/k9s/internal/config" - - "github.com/derailed/k9s/internal/dao" - "github.com/derailed/k9s/internal/ui" "github.com/derailed/tcell/v2" "github.com/derailed/tview" "github.com/rs/zerolog/log" + + "github.com/derailed/k9s/internal/config" + "github.com/derailed/k9s/internal/dao" + "github.com/derailed/k9s/internal/ui" ) // ScaleExtender adds scaling extensions. @@ -35,12 +35,21 @@ func (s *ScaleExtender) bindKeys(aa *ui.KeyActions) { if s.App().Config.K9s.IsReadOnly() { return } - aa.Add(ui.KeyS, ui.NewKeyActionWithOpts("Scale", s.scaleCmd, - ui.ActionOpts{ - Visible: true, - Dangerous: true, - }, - )) + + meta, err := dao.MetaAccess.MetaFor(s.GVR()) + if err != nil { + log.Error().Err(err).Msgf("Unable to retrieve meta information for %s", s.GVR()) + return + } + + if dao.IsK9sMeta(meta) || dao.IsScalable(meta) { + aa.Add(ui.KeyS, ui.NewKeyActionWithOpts("Scale", s.scaleCmd, + ui.ActionOpts{ + Visible: true, + Dangerous: true, + }, + )) + } } func (s *ScaleExtender) scaleCmd(evt *tcell.EventKey) *tcell.EventKey { @@ -127,7 +136,7 @@ func (s *ScaleExtender) makeScaleForm(sels []string) (*tview.Form, error) { if len(sels) == 1 { // If the CRD resource supports scaling, then first try to // read the replicas directly from the CRD. - if dao.MetaAccess.IsScalable(s.GVR()) { + if meta, _ := dao.MetaAccess.MetaFor(s.GVR()); dao.IsScalable(meta) { replicas, err := s.replicasFromScaleSubresource(sels[0]) if err == nil && len(replicas) != 0 { factor = replicas diff --git a/internal/watch/factory.go b/internal/watch/factory.go index 698b17a458..e1a7df7a48 100644 --- a/internal/watch/factory.go +++ b/internal/watch/factory.go @@ -9,7 +9,6 @@ import ( "sync" "time" - "github.com/derailed/k9s/internal/client" "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -17,10 +16,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" di "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/informers" + + "github.com/derailed/k9s/internal/client" ) const ( - defaultResync = 10 * time.Minute + defaultResync = 10 * time.Minute + defaultWaitTime = 250 * time.Millisecond ) // Factory tracks various resource informers. @@ -142,8 +144,13 @@ func (f *Factory) waitForCacheSync(ns string) { return } - // we must block until all started informers' caches were synced - _ = fac.WaitForCacheSync(f.stopChan) + // Hang for a sec for the cache to refresh if still not done bail out! + c := make(chan struct{}) + go func(c chan struct{}) { + <-time.After(defaultWaitTime) + close(c) + }(c) + _ = fac.WaitForCacheSync(c) } // WaitForCacheSync waits for all factories to update their cache.