mirror of
https://github.com/komodorio/helm-dashboard.git
synced 2026-03-24 11:48:04 +00:00
* Object model with self-sufficient binary (#131) * Code cosmetics * Experimenting with object model and direct HELM usage * Experiment with object model * replacing the kubectl * Progressing * Save the progress * Able to start with migration in mind * Migrated two pieces * List releases via Helm * Forgotten field * Cristallized the problem of ctx switcher * Reworked to multi-context * Rollback is also new style * More migration * Refactoring * Describe via code * Bye-bye kubectl binary * Eliminate more old code * Refactor a bit * Merges * No binaries in dockerfile * Commit * Progress with getting the data * Learned the thing about get * One field less * Sstart with repos * Repo add * repo remove * Repos! Icons! * Simplified access to data * Ver listing works * Ver check works * Caching and values * fixup * Done with repos * Working on install * Install work-ish * Fix UI failing on install * Upgrade flow works * Fix image building * Remove outdated test file * Move files around * REfactorings * Cosmetics * Test for cache control (#151) * Files import formatted * Added go-test tools * Added test for no-cache header * added changes * test for cache behaviour of app * test for static route (#153) * Tests: route configuration & context setter (#154) * Test for route configuration * Test for context setter middleware * implemented changes * Restore coverage profile Fixes #156 * Cosmetics * Test for `NewRouter` function (#157) * Test for `configureScanners` (#158) * Test for `configureKubectls` (#163) * Test for repository loading (#169) - Created `repos_test.go` - Test: `Load()` of Repositories * Build all PRs * Fixes failing test (#171) * Fixes failing test - Fixes failing test of repo loading * handles error for * Did some changes * Test for listing of repos (#173) - and did some code formatting Signed-off-by: OmAxiani0 <aximaniom@gmail.com> Signed-off-by: OmAxiani0 <aximaniom@gmail.com> * Test for adding repo (#175) - Modified the `repositories.yml` file Signed-off-by: OmAxiani0 <aximaniom@gmail.com> Signed-off-by: OmAxiani0 <aximaniom@gmail.com> * Test for deleting the repository (#176) * Test for deleting the repository - Also added cleanup function for `TestAdd` * Fixes failing test * Add auto labeler for PR's (#174) * Add auto labeler for PR's * Add all file under .github/workflow to 'ci' label Co-authored-by: Harshit Mehta <harshitm@nvidia.com> * Test for getting repository (#177) * Add github workflow for auto PR labeling (#181) Co-authored-by: Harshit Mehta <harshitm@nvidia.com> * Stub compilation * Fixes around installing * More complex test * Using object model to execute helm test (#191) * Expand test * More test * Coverage * Add mutex for operations * Rectore cluster detection code * Change receiver to pointer * Support multiple namespaces * Cosmetics * Update repos periodically * fix tests * Fix error display * Allow reconfiguring chart without repo * mute linter * Cosmetics * Failing approach to parse manifests Relates to #30 * Report the error properly * ✅ Add test for dashboard/objects/data.go NewDataLayer (#199) * Fix problem of wrong namespace * Added unit tests for releases (#204) * Rework API routes (#197) * Bootstrap OpenAPI doc * Renaming some routes * Listing namespaces * k8s part of things * Repositories section * Document scanners API * One more API call * Progress * Reworked install flow * History endpoint * Textual info section * Resources endpoint * Rollback endpoint * Rollback endpoint * Unit tests * Cleanup * Forgotten tags * Fix tests * TODOs * Rework manifest scanning * add hasTests flag * Adding more information on UI for helm test API response (#195) * Hide test button when no tests Fixes #115 Improves #195 --------- Signed-off-by: OmAxiani0 <aximaniom@gmail.com> Co-authored-by: Om Aximani <75031769+OmAximani0@users.noreply.github.com> Co-authored-by: Harshit Mehta <hdm23061993@gmail.com> Co-authored-by: Harshit Mehta <harshitm@nvidia.com> Co-authored-by: Todd Turner <todd@toddtee.sh> Co-authored-by: arvindsundararajan98 <109727359+arvindsundararajan98@users.noreply.github.com>
233 lines
5.0 KiB
Go
233 lines
5.0 KiB
Go
package objects
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/json"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/joomcode/errorx"
|
|
"github.com/komodorio/helm-dashboard/pkg/dashboard/subproc"
|
|
"github.com/pkg/errors"
|
|
log "github.com/sirupsen/logrus"
|
|
"gopkg.in/yaml.v3"
|
|
"helm.sh/helm/v3/pkg/action"
|
|
"helm.sh/helm/v3/pkg/cli"
|
|
"helm.sh/helm/v3/pkg/release"
|
|
"io"
|
|
v1 "k8s.io/apimachinery/pkg/apis/testapigroup/v1"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
)
|
|
|
|
type DataLayer struct {
|
|
KubeContext string
|
|
Scanners []subproc.Scanner
|
|
StatusInfo *StatusInfo
|
|
Namespaces []string
|
|
Cache *Cache
|
|
|
|
ConfGen HelmConfigGetter
|
|
appPerContext map[string]*Application
|
|
appPerContextMx *sync.Mutex
|
|
}
|
|
|
|
type StatusInfo struct {
|
|
CurVer string
|
|
LatestVer string
|
|
Analytics bool
|
|
CacheHitRatio float64
|
|
ClusterMode bool
|
|
}
|
|
|
|
func NewDataLayer(ns []string, ver string, cg HelmConfigGetter) (*DataLayer, error) {
|
|
if cg == nil {
|
|
return nil, errors.New("HelmConfigGetter can't be nil")
|
|
}
|
|
|
|
return &DataLayer{
|
|
Namespaces: ns,
|
|
Cache: NewCache(),
|
|
StatusInfo: &StatusInfo{
|
|
CurVer: ver,
|
|
Analytics: false,
|
|
},
|
|
|
|
ConfGen: cg,
|
|
appPerContext: map[string]*Application{},
|
|
appPerContextMx: new(sync.Mutex),
|
|
}, nil
|
|
}
|
|
|
|
func (d *DataLayer) ListContexts() ([]KubeContext, error) {
|
|
res := []KubeContext{}
|
|
|
|
if d.StatusInfo.ClusterMode {
|
|
return res, nil
|
|
}
|
|
|
|
cfg, err := clientcmd.NewDefaultPathOptions().GetStartingConfig()
|
|
if err != nil {
|
|
return nil, errorx.Decorate(err, "failed to get kubectl config")
|
|
}
|
|
|
|
for name, ctx := range cfg.Contexts {
|
|
res = append(res, KubeContext{
|
|
IsCurrent: cfg.CurrentContext == name,
|
|
Name: name,
|
|
Cluster: ctx.Cluster,
|
|
AuthInfo: ctx.AuthInfo,
|
|
Namespace: ctx.Namespace,
|
|
})
|
|
}
|
|
|
|
return res, nil
|
|
}
|
|
|
|
func (d *DataLayer) GetStatus() *StatusInfo {
|
|
sum := float64(d.Cache.HitCount + d.Cache.MissCount)
|
|
if sum > 0 {
|
|
d.StatusInfo.CacheHitRatio = float64(d.Cache.HitCount) / sum
|
|
} else {
|
|
d.StatusInfo.CacheHitRatio = 0
|
|
}
|
|
return d.StatusInfo
|
|
}
|
|
|
|
type SectionFn = func(*release.Release, bool) (string, error)
|
|
|
|
func ParseManifests(out string) ([]*v1.Carp, error) {
|
|
dec := yaml.NewDecoder(bytes.NewReader([]byte(out)))
|
|
|
|
res := make([]*v1.Carp, 0)
|
|
var tmp interface{}
|
|
for {
|
|
err := dec.Decode(&tmp)
|
|
if err == io.EOF {
|
|
break
|
|
}
|
|
|
|
if err != nil {
|
|
return nil, errorx.Decorate(err, "failed to parse manifest document #%d", len(res)+1)
|
|
}
|
|
|
|
// k8s libs uses only JSON tags defined, say hello to https://github.com/go-yaml/yaml/issues/424
|
|
// we can juggle it
|
|
jsoned, err := json.Marshal(tmp)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var doc v1.Carp
|
|
err = json.Unmarshal(jsoned, &doc)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if doc.Kind == "" {
|
|
log.Warnf("Manifest piece is not k8s resource: %s", jsoned)
|
|
continue
|
|
}
|
|
|
|
res = append(res, &doc)
|
|
}
|
|
return res, nil
|
|
}
|
|
|
|
func (d *DataLayer) SetContext(ctx string) error {
|
|
if d.KubeContext != ctx {
|
|
err := d.Cache.Clear()
|
|
if err != nil {
|
|
return errors.Wrap(err, "failed to set context")
|
|
}
|
|
}
|
|
|
|
d.KubeContext = ctx
|
|
|
|
return nil
|
|
}
|
|
|
|
func (d *DataLayer) AppForCtx(ctx string) (*Application, error) {
|
|
d.appPerContextMx.Lock()
|
|
defer d.appPerContextMx.Unlock()
|
|
|
|
app, ok := d.appPerContext[ctx]
|
|
if !ok {
|
|
settings := cli.New()
|
|
settings.KubeContext = ctx
|
|
|
|
settings.SetNamespace(d.nsForCtx(ctx))
|
|
|
|
cfgGetter := func(ns string) (*action.Configuration, error) {
|
|
return d.ConfGen(settings, ns)
|
|
}
|
|
|
|
a, err := NewApplication(settings, cfgGetter, d.Namespaces)
|
|
if err != nil {
|
|
return nil, errorx.Decorate(err, "Failed to create application for context '%s'", ctx)
|
|
}
|
|
|
|
app = a
|
|
d.appPerContext[ctx] = app
|
|
}
|
|
return app, nil
|
|
}
|
|
|
|
func (d *DataLayer) nsForCtx(ctx string) string {
|
|
lst, err := d.ListContexts()
|
|
if err != nil {
|
|
log.Debugf("Failed to get contexts for NS lookup: %+v", err)
|
|
}
|
|
for _, c := range lst {
|
|
if c.Name == ctx {
|
|
return c.Namespace
|
|
}
|
|
}
|
|
log.Debugf("Strange: no context found for '%s'", ctx)
|
|
return ""
|
|
}
|
|
|
|
func (d *DataLayer) PeriodicTasks(ctx context.Context) {
|
|
if !d.StatusInfo.ClusterMode { // TODO: maybe have a separate flag for that?
|
|
log.Debugf("Not in cluster mode, not starting background tasks")
|
|
return
|
|
}
|
|
|
|
// auto-update repos
|
|
go d.loopUpdateRepos(ctx, 10*time.Minute) // TODO: parameterize interval?
|
|
|
|
// auto-scan
|
|
}
|
|
|
|
func (d *DataLayer) loopUpdateRepos(ctx context.Context, interval time.Duration) {
|
|
ticker := time.NewTicker(interval)
|
|
for {
|
|
app, err := d.AppForCtx("")
|
|
if err != nil {
|
|
log.Warnf("Failed to get app object while in background repo update: %v", err)
|
|
break // no point in retrying
|
|
} else {
|
|
repos, err := app.Repositories.List()
|
|
if err != nil {
|
|
log.Warnf("Failed to get list of repos while in background update: %v", err)
|
|
}
|
|
|
|
for _, repo := range repos {
|
|
err := repo.Update()
|
|
if err != nil {
|
|
log.Warnf("Failed to update repo %s: %v", repo.Orig.Name, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
select {
|
|
case <-ctx.Done():
|
|
ticker.Stop()
|
|
return
|
|
case <-ticker.C:
|
|
continue
|
|
}
|
|
}
|
|
log.Debugf("Update repo loop done.")
|
|
}
|