Skip to content

Commit

Permalink
dynamically check if a resource is scalable
Browse files Browse the repository at this point in the history
  • Loading branch information
wjiec committed Dec 1, 2024
1 parent ac11361 commit 07f9053
Show file tree
Hide file tree
Showing 6 changed files with 79 additions and 74 deletions.
86 changes: 37 additions & 49 deletions internal/dao/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,20 @@ package dao

import (
"fmt"
"slices"
"sort"
"strings"
"sync"

"github.com/derailed/k9s/internal/client"
"github.com/rs/zerolog/log"
apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"

"github.com/derailed/k9s/internal/client"
)

const (
Expand Down Expand Up @@ -95,10 +98,7 @@ func AccessorFor(f Factory, gvr client.GVR) (Accessor, error) {

r, ok := m[gvr]
if !ok {
r = new(Generic)
if MetaAccess.IsScalable(gvr) {
r = new(Scaler)
}
r = new(Scaler)
log.Debug().Msgf("No DAO registry entry for %q. Using generics!", gvr)
}
r.Init(f, gvr)
Expand Down Expand Up @@ -144,12 +144,7 @@ func (m *Meta) GVK2GVR(gv schema.GroupVersion, kind string) (client.GVR, bool, b

// IsCRD checks if resource represents a CRD
func IsCRD(r metav1.APIResource) bool {
for _, c := range r.Categories {
if c == crdCat {
return true
}
}
return false
return slices.Contains(r.Categories, crdCat)
}

// MetaFor returns a resource metadata for a given gvr.
Expand All @@ -166,24 +161,19 @@ func (m *Meta) MetaFor(gvr client.GVR) (metav1.APIResource, error) {

// IsK8sMeta checks for non resource meta.
func IsK8sMeta(m metav1.APIResource) bool {
for _, c := range m.Categories {
if c == k9sCat || c == helmCat {
return false
}
}

return true
return !slices.ContainsFunc(m.Categories, func(category string) bool {
return category == k9sCat || category == helmCat
})
}

// IsK9sMeta checks for non resource meta.
func IsK9sMeta(m metav1.APIResource) bool {
for _, c := range m.Categories {
if c == k9sCat {
return true
}
}
return slices.Contains(m.Categories, k9sCat)
}

return false
// IsScalable check if the resource can be scaled
func IsScalable(m metav1.APIResource) bool {
return slices.Contains(m.Categories, scaleCat)
}

// LoadResources hydrates server preferred+CRDs resource metadata.
Expand All @@ -196,22 +186,12 @@ func (m *Meta) LoadResources(f Factory) error {
return err
}
loadNonResource(m.resMetas)
loadCRDs(f, m.resMetas)

return nil
}

// IsScalable check if the resource can be scaled
func (m *Meta) IsScalable(gvr client.GVR) bool {
if meta, ok := m.resMetas[gvr]; ok {
for _, c := range meta.Categories {
if c == scaleCat {
return true
}
}
}
// We've actually loaded all the CRDs in loadPreferred, and we're now adding
// some additional CRD properties on top of that.
go m.loadCRDs(f)

return false
return nil
}

// BOZO!! Need countermeasures for direct commands!
Expand Down Expand Up @@ -419,11 +399,16 @@ func isDeprecated(gvr client.GVR) bool {
return ok
}

func loadCRDs(f Factory, m ResourceMetas) {
// loadCRDs Wait for the cache to synced and then add some additional properties to CRD.
func (m *Meta) loadCRDs(f Factory) {
if f.Client() == nil || !f.Client().ConnectionOK() {
return
}
oo, err := f.List(crdGVR, client.ClusterScope, false, labels.Everything())

// we must block until all CRDs caches were synced
f.WaitForCacheSync()

oo, err := f.List(crdGVR, client.ClusterScope, true, labels.Everything())
if err != nil {
log.Warn().Err(err).Msgf("Fail CRDs load")
return
Expand All @@ -440,25 +425,28 @@ func loadCRDs(f Factory, m ResourceMetas) {
var meta metav1.APIResource
meta.Kind = crd.Spec.Names.Kind
meta.Group = crd.Spec.Group
meta.Name = crd.Name
meta.Name = crd.Spec.Names.Plural
meta.SingularName = crd.Spec.Names.Singular
meta.ShortNames = crd.Spec.Names.ShortNames
meta.Namespaced = crd.Spec.Scope == apiext.NamespaceScoped
for _, v := range crd.Spec.Versions {
if v.Served && !v.Deprecated {
if !slices.Contains(meta.Categories, scaleCat) {
meta.Categories = append(meta.Categories, scaleCat)
}

meta.Version = v.Name
break
}
}

// meta, errs := extractMeta(o)
// if len(errs) > 0 {
// log.Error().Err(errs[0]).Msgf("Fail to extract CRD meta (%d) errors", len(errs))
// continue
// }
meta.Categories = append(meta.Categories, crdCat)
gvr := client.NewGVRFromMeta(meta)
m[gvr] = meta
if !slices.Contains(meta.Categories, crdCat) {
meta.Categories = append(meta.Categories, crdCat)
}

m.mx.Lock()
m.resMetas[client.NewGVRFromMeta(meta)] = meta
m.mx.Unlock()
}
}

Expand Down
6 changes: 5 additions & 1 deletion internal/dao/scalable.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of K9s

package dao

import (
"context"

"github.com/derailed/k9s/internal/client"
"github.com/rs/zerolog/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/scale"

"github.com/derailed/k9s/internal/client"
)

var _ Scalable = (*Scaler)(nil)
Expand Down
5 changes: 3 additions & 2 deletions internal/dao/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,15 @@ import (
"io"
"time"

"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/watch"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
restclient "k8s.io/client-go/rest"

"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/watch"
)

// ResourceMetas represents a collection of resource metadata.
Expand Down
10 changes: 3 additions & 7 deletions internal/view/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@ import (
"strings"
"sync"

"github.com/rs/zerolog/log"

"github.com/derailed/k9s/internal/client"
"github.com/derailed/k9s/internal/dao"
"github.com/derailed/k9s/internal/model"
"github.com/derailed/k9s/internal/view/cmd"
"github.com/rs/zerolog/log"
)

var (
Expand Down Expand Up @@ -288,12 +289,7 @@ func (c *Command) viewMetaFor(p *cmd.Interpreter) (client.GVR, *MetaViewer, erro

v := MetaViewer{
viewerFn: func(gvr client.GVR) ResourceViewer {
viewer := NewOwnerExtender(NewBrowser(gvr))
if dao.MetaAccess.IsScalable(gvr) {
viewer = NewScaleExtender(viewer)
}

return viewer
return NewScaleExtender(NewOwnerExtender(NewBrowser(gvr)))
},
}
if mv, ok := customViewers[gvr]; ok {
Expand Down
31 changes: 20 additions & 11 deletions internal/view/scale_extender.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ import (
"strconv"
"strings"

"github.com/derailed/k9s/internal/config"

"github.com/derailed/k9s/internal/dao"
"github.com/derailed/k9s/internal/ui"
"github.com/derailed/tcell/v2"
"github.com/derailed/tview"
"github.com/rs/zerolog/log"

"github.com/derailed/k9s/internal/config"
"github.com/derailed/k9s/internal/dao"
"github.com/derailed/k9s/internal/ui"
)

// ScaleExtender adds scaling extensions.
Expand All @@ -35,12 +35,21 @@ func (s *ScaleExtender) bindKeys(aa *ui.KeyActions) {
if s.App().Config.K9s.IsReadOnly() {
return
}
aa.Add(ui.KeyS, ui.NewKeyActionWithOpts("Scale", s.scaleCmd,
ui.ActionOpts{
Visible: true,
Dangerous: true,
},
))

meta, err := dao.MetaAccess.MetaFor(s.GVR())
if err != nil {
log.Error().Err(err).Msgf("Unable to retrieve meta information for %s", s.GVR())
return
}

if dao.IsK9sMeta(meta) || dao.IsScalable(meta) {
aa.Add(ui.KeyS, ui.NewKeyActionWithOpts("Scale", s.scaleCmd,
ui.ActionOpts{
Visible: true,
Dangerous: true,
},
))
}
}

func (s *ScaleExtender) scaleCmd(evt *tcell.EventKey) *tcell.EventKey {
Expand Down Expand Up @@ -127,7 +136,7 @@ func (s *ScaleExtender) makeScaleForm(sels []string) (*tview.Form, error) {
if len(sels) == 1 {
// If the CRD resource supports scaling, then first try to
// read the replicas directly from the CRD.
if dao.MetaAccess.IsScalable(s.GVR()) {
if meta, _ := dao.MetaAccess.MetaFor(s.GVR()); dao.IsScalable(meta) {
replicas, err := s.replicasFromScaleSubresource(sels[0])
if err == nil && len(replicas) != 0 {
factor = replicas
Expand Down
15 changes: 11 additions & 4 deletions internal/watch/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,20 @@ import (
"sync"
"time"

"github.com/derailed/k9s/internal/client"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
di "k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"

"github.com/derailed/k9s/internal/client"
)

const (
defaultResync = 10 * time.Minute
defaultResync = 10 * time.Minute
defaultWaitTime = 250 * time.Millisecond
)

// Factory tracks various resource informers.
Expand Down Expand Up @@ -142,8 +144,13 @@ func (f *Factory) waitForCacheSync(ns string) {
return
}

// we must block until all started informers' caches were synced
_ = fac.WaitForCacheSync(f.stopChan)
// Hang for a sec for the cache to refresh if still not done bail out!
c := make(chan struct{})
go func(c chan struct{}) {
<-time.After(defaultWaitTime)
close(c)
}(c)
_ = fac.WaitForCacheSync(c)
}

// WaitForCacheSync waits for all factories to update their cache.
Expand Down

0 comments on commit 07f9053

Please sign in to comment.